summaryrefslogtreecommitdiffstats
path: root/media/libaom/src/third_party
diff options
context:
space:
mode:
authorMatt A. Tobin <email@mattatobin.com>2020-04-07 23:30:51 -0400
committerMatt A. Tobin <email@mattatobin.com>2020-04-07 23:30:51 -0400
commit5545a8983ff0ef1fb52e64aef8e66fa9b13c1cbb (patch)
tree45d55e3e5e73c4255c4d71258d9be5b2d004d28f /media/libaom/src/third_party
parent50f1986697a7412e4160976fa5e11217b4ef1f44 (diff)
downloadUXP-5545a8983ff0ef1fb52e64aef8e66fa9b13c1cbb.tar
UXP-5545a8983ff0ef1fb52e64aef8e66fa9b13c1cbb.tar.gz
UXP-5545a8983ff0ef1fb52e64aef8e66fa9b13c1cbb.tar.lz
UXP-5545a8983ff0ef1fb52e64aef8e66fa9b13c1cbb.tar.xz
UXP-5545a8983ff0ef1fb52e64aef8e66fa9b13c1cbb.zip
Move aom source to a sub-directory under media/libaom
There is no damned reason to treat this differently than any other media lib given its license and there never was.
Diffstat (limited to 'media/libaom/src/third_party')
-rw-r--r--media/libaom/src/third_party/fastfeat/LICENSE30
-rw-r--r--media/libaom/src/third_party/fastfeat/README.libvpx39
-rw-r--r--media/libaom/src/third_party/fastfeat/fast.c22
-rw-r--r--media/libaom/src/third_party/fastfeat/fast.h20
-rw-r--r--media/libaom/src/third_party/fastfeat/fast_9.c5911
-rw-r--r--media/libaom/src/third_party/fastfeat/nonmax.c121
-rw-r--r--media/libaom/src/third_party/googletest/README.libaom26
-rw-r--r--media/libaom/src/third_party/googletest/gtest.mk1
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/CHANGES157
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/CMakeLists.txt286
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/CONTRIBUTORS37
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/LICENSE28
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/README.md280
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/cmake/internal_utils.cmake254
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-death-test.h294
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-message.h250
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h1444
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h.pump510
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-printers.h993
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-spi.h232
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-test-part.h179
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-typed-test.h263
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest.h2236
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_pred_impl.h358
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_prod.h58
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-port.h69
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-printers.h42
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest.h41
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-death-test-internal.h319
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-filepath.h206
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-internal.h1238
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-linked_ptr.h243
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h5146
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h.pump286
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util.h731
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port-arch.h93
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port.h2567
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-string.h167
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h1020
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h.pump347
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h3331
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h.pump297
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-all.cc48
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-death-test.cc1342
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-filepath.cc387
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-internal-inl.h1183
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-port.cc1259
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-printers.cc373
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-test-part.cc110
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest-typed-test.cc118
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest.cc5389
-rw-r--r--media/libaom/src/third_party/googletest/src/googletest/src/gtest_main.cc38
-rw-r--r--media/libaom/src/third_party/libwebm/AUTHORS.TXT4
-rw-r--r--media/libaom/src/third_party/libwebm/Android.mk17
-rw-r--r--media/libaom/src/third_party/libwebm/LICENSE.TXT30
-rw-r--r--media/libaom/src/third_party/libwebm/PATENTS.TXT23
-rw-r--r--media/libaom/src/third_party/libwebm/README.libaom22
-rw-r--r--media/libaom/src/third_party/libwebm/common/file_util.cc93
-rw-r--r--media/libaom/src/third_party/libwebm/common/file_util.h44
-rw-r--r--media/libaom/src/third_party/libwebm/common/hdr_util.cc220
-rw-r--r--media/libaom/src/third_party/libwebm/common/hdr_util.h71
-rw-r--r--media/libaom/src/third_party/libwebm/common/webmids.h192
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.cc4194
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.h1922
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxertypes.h28
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc744
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.h112
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.cc90
-rw-r--r--media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.h51
-rw-r--r--media/libaom/src/third_party/libwebm/mkvparser/mkvparser.cc8049
-rw-r--r--media/libaom/src/third_party/libwebm/mkvparser/mkvparser.h1145
-rw-r--r--media/libaom/src/third_party/libwebm/mkvparser/mkvreader.cc133
-rw-r--r--media/libaom/src/third_party/libwebm/mkvparser/mkvreader.h45
-rw-r--r--media/libaom/src/third_party/libyuv/README.libaom15
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/basic_types.h119
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/compare.h79
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/convert.h246
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/convert_argb.h232
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/convert_from.h182
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/convert_from_argb.h191
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/cpu_id.h82
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/mjpeg_decoder.h193
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/planar_functions.h454
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/rotate.h118
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/rotate_argb.h34
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/rotate_row.h139
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/row.h1857
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/scale.h104
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/scale_argb.h58
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/scale_row.h479
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/version.h17
-rw-r--r--media/libaom/src/third_party/libyuv/include/libyuv/video_common.h183
-rw-r--r--media/libaom/src/third_party/libyuv/source/compare.cc373
-rw-r--r--media/libaom/src/third_party/libyuv/source/compare_common.cc42
-rw-r--r--media/libaom/src/third_party/libyuv/source/compare_gcc.cc152
-rw-r--r--media/libaom/src/third_party/libyuv/source/compare_neon.cc65
-rw-r--r--media/libaom/src/third_party/libyuv/source/compare_neon64.cc63
-rw-r--r--media/libaom/src/third_party/libyuv/source/compare_win.cc229
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert.cc1389
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert_argb.cc1155
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert_from.cc1348
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert_from_argb.cc1301
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert_jpeg.cc392
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert_to_argb.cc306
-rw-r--r--media/libaom/src/third_party/libyuv/source/convert_to_i420.cc339
-rw-r--r--media/libaom/src/third_party/libyuv/source/cpu_id.cc307
-rw-r--r--media/libaom/src/third_party/libyuv/source/mjpeg_decoder.cc572
-rw-r--r--media/libaom/src/third_party/libyuv/source/mjpeg_validate.cc101
-rw-r--r--media/libaom/src/third_party/libyuv/source/planar_functions.cc2555
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate.cc496
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_any.cc55
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_argb.cc205
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_common.cc92
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_gcc.cc493
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_mips.cc484
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_neon.cc535
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_neon64.cc543
-rw-r--r--media/libaom/src/third_party/libyuv/source/rotate_win.cc248
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_any.cc680
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_common.cc2576
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_gcc.cc5475
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_mips.cc911
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_neon.cc3084
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_neon64.cc3087
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_win.cc6331
-rw-r--r--media/libaom/src/third_party/libyuv/source/row_x86.asm146
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale.cc1689
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_any.cc200
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_argb.cc853
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_common.cc1137
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_gcc.cc1089
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_mips.cc654
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_neon.cc1037
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_neon64.cc1042
-rw-r--r--media/libaom/src/third_party/libyuv/source/scale_win.cc1354
-rw-r--r--media/libaom/src/third_party/libyuv/source/video_common.cc64
-rw-r--r--media/libaom/src/third_party/libyuv/source/x86inc.asm1136
-rw-r--r--media/libaom/src/third_party/vector/LICENSE19
-rw-r--r--media/libaom/src/third_party/vector/README.libaom14
-rw-r--r--media/libaom/src/third_party/vector/vector.c543
-rw-r--r--media/libaom/src/third_party/vector/vector.h159
-rw-r--r--media/libaom/src/third_party/x86inc/LICENSE18
-rw-r--r--media/libaom/src/third_party/x86inc/README.libaom20
-rw-r--r--media/libaom/src/third_party/x86inc/x86inc.asm1649
144 files changed, 111237 insertions, 0 deletions
diff --git a/media/libaom/src/third_party/fastfeat/LICENSE b/media/libaom/src/third_party/fastfeat/LICENSE
new file mode 100644
index 000000000..f347008d6
--- /dev/null
+++ b/media/libaom/src/third_party/fastfeat/LICENSE
@@ -0,0 +1,30 @@
+Copyright (c) 2006, 2008 Edward Rosten
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+
+ *Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ *Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ *Neither the name of the University of Cambridge nor the names of
+ its contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/media/libaom/src/third_party/fastfeat/README.libvpx b/media/libaom/src/third_party/fastfeat/README.libvpx
new file mode 100644
index 000000000..1e58a303b
--- /dev/null
+++ b/media/libaom/src/third_party/fastfeat/README.libvpx
@@ -0,0 +1,39 @@
+URL: https://github.com/edrosten/fast-C-src
+Version: 391d5e939eb1545d24c10533d7de424db8d9c191
+License: BSD
+License File: LICENSE
+
+Description:
+Library to compute FAST features with non-maximum suppression.
+
+The files are valid C and C++ code, and have no special requirements for
+compiling, and they do not depend on any libraries. Just compile them along with
+the rest of your project.
+
+To use the functions, #include "fast.h"
+
+The corner detectors have the following prototype (where X is 9, 10, 11 or 12):
+
+xy* fastX_detect_nonmax(const unsigned char * data, int xsize, int ysize, int stride, int threshold, int* numcorners)
+
+Where xy is the following simple struct typedef:
+
+typedef struct
+{
+ int x, y;
+} xy;
+
+The image is passed in as a block of data and dimensions, and the list of
+corners is returned as an array of xy structs, and an integer (numcorners)
+with the number of corners returned. The data can be deallocated with free().
+Nonmaximal suppression is performed on the corners. Note that the stride
+is the number of bytes between rows. If your image has no padding, then this
+is the same as xsize.
+
+The detection, scoring and nonmaximal suppression are available as individual
+functions. To see how to use the individual functions, see fast.c
+
+Local Modifications:
+Add lines to turn off clang formatting for these files
+Remove Fast 10, 11 and 12
+Convert tabs to spaces
diff --git a/media/libaom/src/third_party/fastfeat/fast.c b/media/libaom/src/third_party/fastfeat/fast.c
new file mode 100644
index 000000000..0d7efc154
--- /dev/null
+++ b/media/libaom/src/third_party/fastfeat/fast.c
@@ -0,0 +1,22 @@
+// clang-format off
+#include <stdlib.h>
+#include "fast.h"
+
+
+xy* fast9_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ xy* corners;
+ int num_corners;
+ int* scores;
+ xy* nonmax;
+
+ corners = fast9_detect(im, xsize, ysize, stride, b, &num_corners);
+ scores = fast9_score(im, stride, corners, num_corners, b);
+ nonmax = nonmax_suppression(corners, scores, num_corners, ret_num_corners);
+
+ free(corners);
+ free(scores);
+
+ return nonmax;
+}
+// clang-format on
diff --git a/media/libaom/src/third_party/fastfeat/fast.h b/media/libaom/src/third_party/fastfeat/fast.h
new file mode 100644
index 000000000..a00730e3d
--- /dev/null
+++ b/media/libaom/src/third_party/fastfeat/fast.h
@@ -0,0 +1,20 @@
+// clang-format off
+#ifndef FAST_H
+#define FAST_H
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast9_corner_score(const byte* p, const int pixel[], int bstart);
+
+xy* fast9_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+
+int* fast9_score(const byte* i, int stride, xy* corners, int num_corners, int b);
+
+xy* fast9_detect_nonmax(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners);
+
+xy* nonmax_suppression(const xy* corners, const int* scores, int num_corners, int* ret_num_nonmax);
+
+
+#endif
+// clang-format on
diff --git a/media/libaom/src/third_party/fastfeat/fast_9.c b/media/libaom/src/third_party/fastfeat/fast_9.c
new file mode 100644
index 000000000..ec167a953
--- /dev/null
+++ b/media/libaom/src/third_party/fastfeat/fast_9.c
@@ -0,0 +1,5911 @@
+// clang-format off
+/*This is mechanically generated code*/
+#include <stdlib.h>
+
+typedef struct { int x, y; } xy;
+typedef unsigned char byte;
+
+int fast9_corner_score(const byte* p, const int pixel[], int bstart)
+{
+ int bmin = bstart;
+ int bmax = 255;
+ int b = (bmax + bmin)/2;
+
+ /*Compute the score using binary search*/
+ for(;;)
+ {
+ int cb = *p + b;
+ int c_b= *p - b;
+
+
+ if( p[pixel[0]] > cb)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[15]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[6]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[0]] < c_b)
+ if( p[pixel[1]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[1]] < c_b)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[6]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[15]] < c_b)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[6]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[9]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[8]] > cb)
+ if( p[pixel[7]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[10]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[7]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[7]] > cb)
+ if( p[pixel[8]] > cb)
+ if( p[pixel[9]] > cb)
+ if( p[pixel[6]] > cb)
+ if( p[pixel[5]] > cb)
+ if( p[pixel[4]] > cb)
+ if( p[pixel[3]] > cb)
+ if( p[pixel[2]] > cb)
+ if( p[pixel[1]] > cb)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] > cb)
+ if( p[pixel[11]] > cb)
+ if( p[pixel[12]] > cb)
+ if( p[pixel[13]] > cb)
+ if( p[pixel[14]] > cb)
+ if( p[pixel[15]] > cb)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else if( p[pixel[7]] < c_b)
+ if( p[pixel[8]] < c_b)
+ if( p[pixel[9]] < c_b)
+ if( p[pixel[6]] < c_b)
+ if( p[pixel[5]] < c_b)
+ if( p[pixel[4]] < c_b)
+ if( p[pixel[3]] < c_b)
+ if( p[pixel[2]] < c_b)
+ if( p[pixel[1]] < c_b)
+ goto is_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ if( p[pixel[10]] < c_b)
+ if( p[pixel[11]] < c_b)
+ if( p[pixel[12]] < c_b)
+ if( p[pixel[13]] < c_b)
+ if( p[pixel[14]] < c_b)
+ if( p[pixel[15]] < c_b)
+ goto is_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+ else
+ goto is_not_a_corner;
+
+is_a_corner:
+ bmin=b;
+ goto end_if;
+
+is_not_a_corner:
+ bmax=b;
+ goto end_if;
+
+end_if:
+
+ if(bmin == bmax - 1 || bmin == bmax)
+ return bmin;
+ b = (bmin + bmax) / 2;
+ }
+}
+
+static void make_offsets(int pixel[], int row_stride)
+{
+ pixel[0] = 0 + row_stride * 3;
+ pixel[1] = 1 + row_stride * 3;
+ pixel[2] = 2 + row_stride * 2;
+ pixel[3] = 3 + row_stride * 1;
+ pixel[4] = 3 + row_stride * 0;
+ pixel[5] = 3 + row_stride * -1;
+ pixel[6] = 2 + row_stride * -2;
+ pixel[7] = 1 + row_stride * -3;
+ pixel[8] = 0 + row_stride * -3;
+ pixel[9] = -1 + row_stride * -3;
+ pixel[10] = -2 + row_stride * -2;
+ pixel[11] = -3 + row_stride * -1;
+ pixel[12] = -3 + row_stride * 0;
+ pixel[13] = -3 + row_stride * 1;
+ pixel[14] = -2 + row_stride * 2;
+ pixel[15] = -1 + row_stride * 3;
+}
+
+
+
+int* fast9_score(const byte* i, int stride, xy* corners, int num_corners, int b)
+{
+ int* scores = (int*)malloc(sizeof(int)* num_corners);
+ int n;
+
+ int pixel[16];
+ make_offsets(pixel, stride);
+
+ for(n=0; n < num_corners; n++)
+ scores[n] = fast9_corner_score(i + corners[n].y*stride + corners[n].x, pixel, b);
+
+ return scores;
+}
+
+
+xy* fast9_detect(const byte* im, int xsize, int ysize, int stride, int b, int* ret_num_corners)
+{
+ int num_corners=0;
+ xy* ret_corners;
+ int rsize=512;
+ int pixel[16];
+ int x, y;
+
+ ret_corners = (xy*)malloc(sizeof(xy)*rsize);
+ make_offsets(pixel, stride);
+
+ for(y=3; y < ysize - 3; y++)
+ for(x=3; x < xsize - 3; x++)
+ {
+ const byte* p = im + y*stride + x;
+
+ int cb = *p + b;
+ int c_b= *p - b;
+ if(p[pixel[0]] > cb)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[15]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[6]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ {}
+ else
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ {}
+ else
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ {}
+ else
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ {}
+ else
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[0]] < c_b)
+ if(p[pixel[1]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[1]] < c_b)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[6]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[15]] < c_b)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[6]] > cb)
+ {}
+ else
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ {}
+ else
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ {}
+ else
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[9]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ {}
+ else
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[8]] > cb)
+ if(p[pixel[7]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[10]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ {}
+ else
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[7]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[7]] > cb)
+ if(p[pixel[8]] > cb)
+ if(p[pixel[9]] > cb)
+ if(p[pixel[6]] > cb)
+ if(p[pixel[5]] > cb)
+ if(p[pixel[4]] > cb)
+ if(p[pixel[3]] > cb)
+ if(p[pixel[2]] > cb)
+ if(p[pixel[1]] > cb)
+ {}
+ else
+ if(p[pixel[10]] > cb)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] > cb)
+ if(p[pixel[11]] > cb)
+ if(p[pixel[12]] > cb)
+ if(p[pixel[13]] > cb)
+ if(p[pixel[14]] > cb)
+ if(p[pixel[15]] > cb)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else if(p[pixel[7]] < c_b)
+ if(p[pixel[8]] < c_b)
+ if(p[pixel[9]] < c_b)
+ if(p[pixel[6]] < c_b)
+ if(p[pixel[5]] < c_b)
+ if(p[pixel[4]] < c_b)
+ if(p[pixel[3]] < c_b)
+ if(p[pixel[2]] < c_b)
+ if(p[pixel[1]] < c_b)
+ {}
+ else
+ if(p[pixel[10]] < c_b)
+ {}
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ if(p[pixel[10]] < c_b)
+ if(p[pixel[11]] < c_b)
+ if(p[pixel[12]] < c_b)
+ if(p[pixel[13]] < c_b)
+ if(p[pixel[14]] < c_b)
+ if(p[pixel[15]] < c_b)
+ {}
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ else
+ continue;
+ if(num_corners == rsize)
+ {
+ rsize*=2;
+ ret_corners = (xy*)realloc(ret_corners, sizeof(xy)*rsize);
+ }
+ ret_corners[num_corners].x = x;
+ ret_corners[num_corners].y = y;
+ num_corners++;
+
+ }
+
+ *ret_num_corners = num_corners;
+ return ret_corners;
+
+}
+
+// clang-format on
diff --git a/media/libaom/src/third_party/fastfeat/nonmax.c b/media/libaom/src/third_party/fastfeat/nonmax.c
new file mode 100644
index 000000000..0438c4dc1
--- /dev/null
+++ b/media/libaom/src/third_party/fastfeat/nonmax.c
@@ -0,0 +1,121 @@
+// clang-format off
+#include <stdlib.h>
+#include "fast.h"
+
+
+#define Compare(X, Y) ((X)>=(Y))
+
+xy* nonmax_suppression(const xy* corners, const int* scores, int num_corners, int* ret_num_nonmax)
+{
+ int num_nonmax=0;
+ int last_row;
+ int* row_start;
+ int i, j;
+ xy* ret_nonmax;
+ const int sz = (int)num_corners;
+
+ /*Point above points (roughly) to the pixel above the one of interest, if there
+ is a feature there.*/
+ int point_above = 0;
+ int point_below = 0;
+
+
+ if(num_corners < 1)
+ {
+ *ret_num_nonmax = 0;
+ return 0;
+ }
+
+ ret_nonmax = (xy*)malloc(num_corners * sizeof(xy));
+
+ /* Find where each row begins
+ (the corners are output in raster scan order). A beginning of -1 signifies
+ that there are no corners on that row. */
+ last_row = corners[num_corners-1].y;
+ row_start = (int*)malloc((last_row+1)*sizeof(int));
+
+ for(i=0; i < last_row+1; i++)
+ row_start[i] = -1;
+
+ {
+ int prev_row = -1;
+ for(i=0; i< num_corners; i++)
+ if(corners[i].y != prev_row)
+ {
+ row_start[corners[i].y] = i;
+ prev_row = corners[i].y;
+ }
+ }
+
+
+
+ for(i=0; i < sz; i++)
+ {
+ int score = scores[i];
+ xy pos = corners[i];
+
+ /*Check left */
+ if(i > 0)
+ if(corners[i-1].x == pos.x-1 && corners[i-1].y == pos.y && Compare(scores[i-1], score))
+ continue;
+
+ /*Check right*/
+ if(i < (sz - 1))
+ if(corners[i+1].x == pos.x+1 && corners[i+1].y == pos.y && Compare(scores[i+1], score))
+ continue;
+
+ /*Check above (if there is a valid row above)*/
+ if(pos.y > 0)
+ if (row_start[pos.y - 1] != -1)
+ {
+ /*Make sure that current point_above is one
+ row above.*/
+ if(corners[point_above].y < pos.y - 1)
+ point_above = row_start[pos.y-1];
+
+ /*Make point_above point to the first of the pixels above the current point,
+ if it exists.*/
+ for(; corners[point_above].y < pos.y && corners[point_above].x < pos.x - 1; point_above++)
+ {}
+
+
+ for(j=point_above; corners[j].y < pos.y && corners[j].x <= pos.x + 1; j++)
+ {
+ int x = corners[j].x;
+ if( (x == pos.x - 1 || x ==pos.x || x == pos.x+1) && Compare(scores[j], score))
+ goto cont;
+ }
+
+ }
+
+ /*Check below (if there is anything below)*/
+ if(pos.y >= 0)
+ if (pos.y != last_row && row_start[pos.y + 1] != -1 && point_below < sz) /*Nothing below*/
+ {
+ if(corners[point_below].y < pos.y + 1)
+ point_below = row_start[pos.y+1];
+
+ /* Make point below point to one of the pixels belowthe current point, if it
+ exists.*/
+ for(; point_below < sz && corners[point_below].y == pos.y+1 && corners[point_below].x < pos.x - 1; point_below++)
+ {}
+
+ for(j=point_below; j < sz && corners[j].y == pos.y+1 && corners[j].x <= pos.x + 1; j++)
+ {
+ int x = corners[j].x;
+ if( (x == pos.x - 1 || x ==pos.x || x == pos.x+1) && Compare(scores[j],score))
+ goto cont;
+ }
+ }
+
+ ret_nonmax[num_nonmax++] = corners[i];
+cont:
+ ;
+ }
+
+ free(row_start);
+ *ret_num_nonmax = num_nonmax;
+ return ret_nonmax;
+}
+
+// clang-format on
diff --git a/media/libaom/src/third_party/googletest/README.libaom b/media/libaom/src/third_party/googletest/README.libaom
new file mode 100644
index 000000000..9784dd51b
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/README.libaom
@@ -0,0 +1,26 @@
+URL: https://github.com/google/googletest
+Version: 1.8.0
+License: BSD
+License File: LICENSE
+
+Description:
+Google's framework for writing C++ tests on a variety of platforms
+(Linux, Mac OS X, Windows, Windows CE, Symbian, etc). Based on the
+xUnit architecture. Supports automatic test discovery, a rich set of
+assertions, user-defined assertions, death tests, fatal and non-fatal
+failures, various options for running the tests, and XML test report
+generation.
+
+Local Modifications:
+- Remove everything but:
+ googletest-release-1.8.0/googletest/
+ cmake/
+ include/
+ src/
+ CHANGES
+ CMakelists.txt
+ CONTRIBUTORS
+ LICENSE
+ README.md
+- Suppress unsigned overflow instrumentation in the LCG
+ https://github.com/google/googletest/pull/1066
diff --git a/media/libaom/src/third_party/googletest/gtest.mk b/media/libaom/src/third_party/googletest/gtest.mk
new file mode 100644
index 000000000..fc4dbdc24
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/gtest.mk
@@ -0,0 +1 @@
+GTEST_SRCS-yes += googletest/src/googletest/src/gtest-all.cc
diff --git a/media/libaom/src/third_party/googletest/src/googletest/CHANGES b/media/libaom/src/third_party/googletest/src/googletest/CHANGES
new file mode 100644
index 000000000..055213242
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/CHANGES
@@ -0,0 +1,157 @@
+Changes for 1.7.0:
+
+* New feature: death tests are supported on OpenBSD and in iOS
+ simulator now.
+* New feature: Google Test now implements a protocol to allow
+ a test runner to detect that a test program has exited
+ prematurely and report it as a failure (before it would be
+ falsely reported as a success if the exit code is 0).
+* New feature: Test::RecordProperty() can now be used outside of the
+ lifespan of a test method, in which case it will be attributed to
+ the current test case or the test program in the XML report.
+* New feature (potentially breaking): --gtest_list_tests now prints
+ the type parameters and value parameters for each test.
+* Improvement: char pointers and char arrays are now escaped properly
+ in failure messages.
+* Improvement: failure summary in XML reports now includes file and
+ line information.
+* Improvement: the <testsuites> XML element now has a timestamp attribute.
+* Improvement: When --gtest_filter is specified, XML report now doesn't
+ contain information about tests that are filtered out.
+* Fixed the bug where long --gtest_filter flag values are truncated in
+ death tests.
+* Potentially breaking change: RUN_ALL_TESTS() is now implemented as a
+ function instead of a macro in order to work better with Clang.
+* Compatibility fixes with C++ 11 and various platforms.
+* Bug/warning fixes.
+
+Changes for 1.6.0:
+
+* New feature: ADD_FAILURE_AT() for reporting a test failure at the
+ given source location -- useful for writing testing utilities.
+* New feature: the universal value printer is moved from Google Mock
+ to Google Test.
+* New feature: type parameters and value parameters are reported in
+ the XML report now.
+* A gtest_disable_pthreads CMake option.
+* Colored output works in GNU Screen sessions now.
+* Parameters of value-parameterized tests are now printed in the
+ textual output.
+* Failures from ad hoc test assertions run before RUN_ALL_TESTS() are
+ now correctly reported.
+* Arguments of ASSERT_XY and EXPECT_XY no longer need to support << to
+ ostream.
+* More complete handling of exceptions.
+* GTEST_ASSERT_XY can be used instead of ASSERT_XY in case the latter
+ name is already used by another library.
+* --gtest_catch_exceptions is now true by default, allowing a test
+ program to continue after an exception is thrown.
+* Value-parameterized test fixtures can now derive from Test and
+ WithParamInterface<T> separately, easing conversion of legacy tests.
+* Death test messages are clearly marked to make them more
+ distinguishable from other messages.
+* Compatibility fixes for Android, Google Native Client, MinGW, HP UX,
+ PowerPC, Lucid autotools, libCStd, Sun C++, Borland C++ Builder (Code Gear),
+ IBM XL C++ (Visual Age C++), and C++0x.
+* Bug fixes and implementation clean-ups.
+* Potentially incompatible changes: disables the harmful 'make install'
+ command in autotools.
+
+Changes for 1.5.0:
+
+ * New feature: assertions can be safely called in multiple threads
+ where the pthreads library is available.
+ * New feature: predicates used inside EXPECT_TRUE() and friends
+ can now generate custom failure messages.
+ * New feature: Google Test can now be compiled as a DLL.
+ * New feature: fused source files are included.
+ * New feature: prints help when encountering unrecognized Google Test flags.
+ * Experimental feature: CMake build script (requires CMake 2.6.4+).
+ * Experimental feature: the Pump script for meta programming.
+ * double values streamed to an assertion are printed with enough precision
+ to differentiate any two different values.
+ * Google Test now works on Solaris and AIX.
+ * Build and test script improvements.
+ * Bug fixes and implementation clean-ups.
+
+ Potentially breaking changes:
+
+ * Stopped supporting VC++ 7.1 with exceptions disabled.
+ * Dropped support for 'make install'.
+
+Changes for 1.4.0:
+
+ * New feature: the event listener API
+ * New feature: test shuffling
+ * New feature: the XML report format is closer to junitreport and can
+ be parsed by Hudson now.
+ * New feature: when a test runs under Visual Studio, its failures are
+ integrated in the IDE.
+ * New feature: /MD(d) versions of VC++ projects.
+ * New feature: elapsed time for the tests is printed by default.
+ * New feature: comes with a TR1 tuple implementation such that Boost
+ is no longer needed for Combine().
+ * New feature: EXPECT_DEATH_IF_SUPPORTED macro and friends.
+ * New feature: the Xcode project can now produce static gtest
+ libraries in addition to a framework.
+ * Compatibility fixes for Solaris, Cygwin, minGW, Windows Mobile,
+ Symbian, gcc, and C++Builder.
+ * Bug fixes and implementation clean-ups.
+
+Changes for 1.3.0:
+
+ * New feature: death tests on Windows, Cygwin, and Mac.
+ * New feature: ability to use Google Test assertions in other testing
+ frameworks.
+ * New feature: ability to run disabled test via
+ --gtest_also_run_disabled_tests.
+ * New feature: the --help flag for printing the usage.
+ * New feature: access to Google Test flag values in user code.
+ * New feature: a script that packs Google Test into one .h and one
+ .cc file for easy deployment.
+ * New feature: support for distributing test functions to multiple
+ machines (requires support from the test runner).
+ * Bug fixes and implementation clean-ups.
+
+Changes for 1.2.1:
+
+ * Compatibility fixes for Linux IA-64 and IBM z/OS.
+ * Added support for using Boost and other TR1 implementations.
+ * Changes to the build scripts to support upcoming release of Google C++
+ Mocking Framework.
+ * Added Makefile to the distribution package.
+ * Improved build instructions in README.
+
+Changes for 1.2.0:
+
+ * New feature: value-parameterized tests.
+ * New feature: the ASSERT/EXPECT_(NON)FATAL_FAILURE(_ON_ALL_THREADS)
+ macros.
+ * Changed the XML report format to match JUnit/Ant's.
+ * Added tests to the Xcode project.
+ * Added scons/SConscript for building with SCons.
+ * Added src/gtest-all.cc for building Google Test from a single file.
+ * Fixed compatibility with Solaris and z/OS.
+ * Enabled running Python tests on systems with python 2.3 installed,
+ e.g. Mac OS X 10.4.
+ * Bug fixes.
+
+Changes for 1.1.0:
+
+ * New feature: type-parameterized tests.
+ * New feature: exception assertions.
+ * New feature: printing elapsed time of tests.
+ * Improved the robustness of death tests.
+ * Added an Xcode project and samples.
+ * Adjusted the output format on Windows to be understandable by Visual Studio.
+ * Minor bug fixes.
+
+Changes for 1.0.1:
+
+ * Added project files for Visual Studio 7.1.
+ * Fixed issues with compiling on Mac OS X.
+ * Fixed issues with compiling on Cygwin.
+
+Changes for 1.0.0:
+
+ * Initial Open Source release of Google Test
diff --git a/media/libaom/src/third_party/googletest/src/googletest/CMakeLists.txt b/media/libaom/src/third_party/googletest/src/googletest/CMakeLists.txt
new file mode 100644
index 000000000..621d0f042
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/CMakeLists.txt
@@ -0,0 +1,286 @@
+########################################################################
+# CMake build script for Google Test.
+#
+# To run the tests for Google Test itself on Linux, use 'make test' or
+# ctest. You can select which tests to run using 'ctest -R regex'.
+# For more options, run 'ctest --help'.
+
+# BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to
+# make it prominent in the GUI.
+option(BUILD_SHARED_LIBS "Build shared libraries (DLLs)." OFF)
+
+# When other libraries are using a shared version of runtime libraries,
+# Google Test also has to use one.
+option(
+ gtest_force_shared_crt
+ "Use shared (DLL) run-time lib even when Google Test is built as static lib."
+ OFF)
+
+option(gtest_build_tests "Build all of gtest's own tests." OFF)
+
+option(gtest_build_samples "Build gtest's sample programs." OFF)
+
+option(gtest_disable_pthreads "Disable uses of pthreads in gtest." OFF)
+
+option(
+ gtest_hide_internal_symbols
+ "Build gtest with internal symbols hidden in shared libraries."
+ OFF)
+
+# Defines pre_project_set_up_hermetic_build() and set_up_hermetic_build().
+include(cmake/hermetic_build.cmake OPTIONAL)
+
+if (COMMAND pre_project_set_up_hermetic_build)
+ pre_project_set_up_hermetic_build()
+endif()
+
+########################################################################
+#
+# Project-wide settings
+
+# Name of the project.
+#
+# CMake files in this project can refer to the root source directory
+# as ${gtest_SOURCE_DIR} and to the root binary directory as
+# ${gtest_BINARY_DIR}.
+# Language "C" is required for find_package(Threads).
+project(gtest CXX C)
+cmake_minimum_required(VERSION 2.6.2)
+
+if (COMMAND set_up_hermetic_build)
+ set_up_hermetic_build()
+endif()
+
+if (gtest_hide_internal_symbols)
+ set(CMAKE_CXX_VISIBILITY_PRESET hidden)
+ set(CMAKE_VISIBILITY_INLINES_HIDDEN 1)
+endif()
+
+# Define helper functions and macros used by Google Test.
+include(cmake/internal_utils.cmake)
+
+config_compiler_and_linker() # Defined in internal_utils.cmake.
+
+# Where Google Test's .h files can be found.
+include_directories(
+ ${gtest_SOURCE_DIR}/include
+ ${gtest_SOURCE_DIR})
+
+# Where Google Test's libraries can be found.
+link_directories(${gtest_BINARY_DIR}/src)
+
+# Summary of tuple support for Microsoft Visual Studio:
+# Compiler version(MS) version(cmake) Support
+# ---------- ----------- -------------- -----------------------------
+# <= VS 2010 <= 10 <= 1600 Use Google Tests's own tuple.
+# VS 2012 11 1700 std::tr1::tuple + _VARIADIC_MAX=10
+# VS 2013 12 1800 std::tr1::tuple
+if (MSVC AND MSVC_VERSION EQUAL 1700)
+ add_definitions(/D _VARIADIC_MAX=10)
+endif()
+
+########################################################################
+#
+# Defines the gtest & gtest_main libraries. User tests should link
+# with one of them.
+
+# Google Test libraries. We build them using more strict warnings than what
+# are used for other targets, to ensure that gtest can be compiled by a user
+# aggressive about warnings.
+cxx_library(gtest "${cxx_strict}" src/gtest-all.cc)
+cxx_library(gtest_main "${cxx_strict}" src/gtest_main.cc)
+target_link_libraries(gtest_main gtest)
+
+# If the CMake version supports it, attach header directory information
+# to the targets for when we are part of a parent build (ie being pulled
+# in via add_subdirectory() rather than being a standalone build).
+if (DEFINED CMAKE_VERSION AND NOT "${CMAKE_VERSION}" VERSION_LESS "2.8.11")
+ target_include_directories(gtest INTERFACE "${gtest_SOURCE_DIR}/include")
+ target_include_directories(gtest_main INTERFACE "${gtest_SOURCE_DIR}/include")
+endif()
+
+########################################################################
+#
+# Install rules
+install(TARGETS gtest gtest_main
+ DESTINATION lib)
+install(DIRECTORY ${gtest_SOURCE_DIR}/include/gtest
+ DESTINATION include)
+
+########################################################################
+#
+# Samples on how to link user tests with gtest or gtest_main.
+#
+# They are not built by default. To build them, set the
+# gtest_build_samples option to ON. You can do it by running ccmake
+# or specifying the -Dgtest_build_samples=ON flag when running cmake.
+
+if (gtest_build_samples)
+ cxx_executable(sample1_unittest samples gtest_main samples/sample1.cc)
+ cxx_executable(sample2_unittest samples gtest_main samples/sample2.cc)
+ cxx_executable(sample3_unittest samples gtest_main)
+ cxx_executable(sample4_unittest samples gtest_main samples/sample4.cc)
+ cxx_executable(sample5_unittest samples gtest_main samples/sample1.cc)
+ cxx_executable(sample6_unittest samples gtest_main)
+ cxx_executable(sample7_unittest samples gtest_main)
+ cxx_executable(sample8_unittest samples gtest_main)
+ cxx_executable(sample9_unittest samples gtest)
+ cxx_executable(sample10_unittest samples gtest)
+endif()
+
+########################################################################
+#
+# Google Test's own tests.
+#
+# You can skip this section if you aren't interested in testing
+# Google Test itself.
+#
+# The tests are not built by default. To build them, set the
+# gtest_build_tests option to ON. You can do it by running ccmake
+# or specifying the -Dgtest_build_tests=ON flag when running cmake.
+
+if (gtest_build_tests)
+ # This must be set in the root directory for the tests to be run by
+ # 'make test' or ctest.
+ enable_testing()
+
+ ############################################################
+ # C++ tests built with standard compiler flags.
+
+ cxx_test(gtest-death-test_test gtest_main)
+ cxx_test(gtest_environment_test gtest)
+ cxx_test(gtest-filepath_test gtest_main)
+ cxx_test(gtest-linked_ptr_test gtest_main)
+ cxx_test(gtest-listener_test gtest_main)
+ cxx_test(gtest_main_unittest gtest_main)
+ cxx_test(gtest-message_test gtest_main)
+ cxx_test(gtest_no_test_unittest gtest)
+ cxx_test(gtest-options_test gtest_main)
+ cxx_test(gtest-param-test_test gtest
+ test/gtest-param-test2_test.cc)
+ cxx_test(gtest-port_test gtest_main)
+ cxx_test(gtest_pred_impl_unittest gtest_main)
+ cxx_test(gtest_premature_exit_test gtest
+ test/gtest_premature_exit_test.cc)
+ cxx_test(gtest-printers_test gtest_main)
+ cxx_test(gtest_prod_test gtest_main
+ test/production.cc)
+ cxx_test(gtest_repeat_test gtest)
+ cxx_test(gtest_sole_header_test gtest_main)
+ cxx_test(gtest_stress_test gtest)
+ cxx_test(gtest-test-part_test gtest_main)
+ cxx_test(gtest_throw_on_failure_ex_test gtest)
+ cxx_test(gtest-typed-test_test gtest_main
+ test/gtest-typed-test2_test.cc)
+ cxx_test(gtest_unittest gtest_main)
+ cxx_test(gtest-unittest-api_test gtest)
+
+ ############################################################
+ # C++ tests built with non-standard compiler flags.
+
+ # MSVC 7.1 does not support STL with exceptions disabled.
+ if (NOT MSVC OR MSVC_VERSION GREATER 1310)
+ cxx_library(gtest_no_exception "${cxx_no_exception}"
+ src/gtest-all.cc)
+ cxx_library(gtest_main_no_exception "${cxx_no_exception}"
+ src/gtest-all.cc src/gtest_main.cc)
+ endif()
+ cxx_library(gtest_main_no_rtti "${cxx_no_rtti}"
+ src/gtest-all.cc src/gtest_main.cc)
+
+ cxx_test_with_flags(gtest-death-test_ex_nocatch_test
+ "${cxx_exception} -DGTEST_ENABLE_CATCH_EXCEPTIONS_=0"
+ gtest test/gtest-death-test_ex_test.cc)
+ cxx_test_with_flags(gtest-death-test_ex_catch_test
+ "${cxx_exception} -DGTEST_ENABLE_CATCH_EXCEPTIONS_=1"
+ gtest test/gtest-death-test_ex_test.cc)
+
+ cxx_test_with_flags(gtest_no_rtti_unittest "${cxx_no_rtti}"
+ gtest_main_no_rtti test/gtest_unittest.cc)
+
+ cxx_shared_library(gtest_dll "${cxx_default}"
+ src/gtest-all.cc src/gtest_main.cc)
+
+ cxx_executable_with_flags(gtest_dll_test_ "${cxx_default}"
+ gtest_dll test/gtest_all_test.cc)
+ set_target_properties(gtest_dll_test_
+ PROPERTIES
+ COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1")
+
+ if (NOT MSVC OR MSVC_VERSION LESS 1600) # 1600 is Visual Studio 2010.
+ # Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that
+ # conflict with our own definitions. Therefore using our own tuple does not
+ # work on those compilers.
+ cxx_library(gtest_main_use_own_tuple "${cxx_use_own_tuple}"
+ src/gtest-all.cc src/gtest_main.cc)
+
+ cxx_test_with_flags(gtest-tuple_test "${cxx_use_own_tuple}"
+ gtest_main_use_own_tuple test/gtest-tuple_test.cc)
+
+ cxx_test_with_flags(gtest_use_own_tuple_test "${cxx_use_own_tuple}"
+ gtest_main_use_own_tuple
+ test/gtest-param-test_test.cc test/gtest-param-test2_test.cc)
+ endif()
+
+ ############################################################
+ # Python tests.
+
+ cxx_executable(gtest_break_on_failure_unittest_ test gtest)
+ py_test(gtest_break_on_failure_unittest)
+
+ # Visual Studio .NET 2003 does not support STL with exceptions disabled.
+ if (NOT MSVC OR MSVC_VERSION GREATER 1310) # 1310 is Visual Studio .NET 2003
+ cxx_executable_with_flags(
+ gtest_catch_exceptions_no_ex_test_
+ "${cxx_no_exception}"
+ gtest_main_no_exception
+ test/gtest_catch_exceptions_test_.cc)
+ endif()
+
+ cxx_executable_with_flags(
+ gtest_catch_exceptions_ex_test_
+ "${cxx_exception}"
+ gtest_main
+ test/gtest_catch_exceptions_test_.cc)
+ py_test(gtest_catch_exceptions_test)
+
+ cxx_executable(gtest_color_test_ test gtest)
+ py_test(gtest_color_test)
+
+ cxx_executable(gtest_env_var_test_ test gtest)
+ py_test(gtest_env_var_test)
+
+ cxx_executable(gtest_filter_unittest_ test gtest)
+ py_test(gtest_filter_unittest)
+
+ cxx_executable(gtest_help_test_ test gtest_main)
+ py_test(gtest_help_test)
+
+ cxx_executable(gtest_list_tests_unittest_ test gtest)
+ py_test(gtest_list_tests_unittest)
+
+ cxx_executable(gtest_output_test_ test gtest)
+ py_test(gtest_output_test)
+
+ cxx_executable(gtest_shuffle_test_ test gtest)
+ py_test(gtest_shuffle_test)
+
+ # MSVC 7.1 does not support STL with exceptions disabled.
+ if (NOT MSVC OR MSVC_VERSION GREATER 1310)
+ cxx_executable(gtest_throw_on_failure_test_ test gtest_no_exception)
+ set_target_properties(gtest_throw_on_failure_test_
+ PROPERTIES
+ COMPILE_FLAGS "${cxx_no_exception}")
+ py_test(gtest_throw_on_failure_test)
+ endif()
+
+ cxx_executable(gtest_uninitialized_test_ test gtest)
+ py_test(gtest_uninitialized_test)
+
+ cxx_executable(gtest_xml_outfile1_test_ test gtest_main)
+ cxx_executable(gtest_xml_outfile2_test_ test gtest_main)
+ py_test(gtest_xml_outfiles_test)
+
+ cxx_executable(gtest_xml_output_unittest_ test gtest)
+ py_test(gtest_xml_output_unittest)
+endif()
diff --git a/media/libaom/src/third_party/googletest/src/googletest/CONTRIBUTORS b/media/libaom/src/third_party/googletest/src/googletest/CONTRIBUTORS
new file mode 100644
index 000000000..feae2fc04
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/CONTRIBUTORS
@@ -0,0 +1,37 @@
+# This file contains a list of people who've made non-trivial
+# contribution to the Google C++ Testing Framework project. People
+# who commit code to the project are encouraged to add their names
+# here. Please keep the list sorted by first names.
+
+Ajay Joshi <jaj@google.com>
+Balázs Dán <balazs.dan@gmail.com>
+Bharat Mediratta <bharat@menalto.com>
+Chandler Carruth <chandlerc@google.com>
+Chris Prince <cprince@google.com>
+Chris Taylor <taylorc@google.com>
+Dan Egnor <egnor@google.com>
+Eric Roman <eroman@chromium.org>
+Hady Zalek <hady.zalek@gmail.com>
+Jeffrey Yasskin <jyasskin@google.com>
+Jói Sigurðsson <joi@google.com>
+Keir Mierle <mierle@gmail.com>
+Keith Ray <keith.ray@gmail.com>
+Kenton Varda <kenton@google.com>
+Manuel Klimek <klimek@google.com>
+Markus Heule <markus.heule@gmail.com>
+Mika Raento <mikie@iki.fi>
+Miklós Fazekas <mfazekas@szemafor.com>
+Pasi Valminen <pasi.valminen@gmail.com>
+Patrick Hanna <phanna@google.com>
+Patrick Riley <pfr@google.com>
+Peter Kaminski <piotrk@google.com>
+Preston Jackson <preston.a.jackson@gmail.com>
+Rainer Klaffenboeck <rainer.klaffenboeck@dynatrace.com>
+Russ Cox <rsc@google.com>
+Russ Rufer <russ@pentad.com>
+Sean Mcafee <eefacm@gmail.com>
+Sigurður Ásgeirsson <siggi@google.com>
+Tracy Bialik <tracy@pentad.com>
+Vadim Berman <vadimb@google.com>
+Vlad Losev <vladl@google.com>
+Zhanyong Wan <wan@google.com>
diff --git a/media/libaom/src/third_party/googletest/src/googletest/LICENSE b/media/libaom/src/third_party/googletest/src/googletest/LICENSE
new file mode 100644
index 000000000..1941a11f8
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/LICENSE
@@ -0,0 +1,28 @@
+Copyright 2008, Google Inc.
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/media/libaom/src/third_party/googletest/src/googletest/README.md b/media/libaom/src/third_party/googletest/src/googletest/README.md
new file mode 100644
index 000000000..edd440805
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/README.md
@@ -0,0 +1,280 @@
+
+### Generic Build Instructions ###
+
+#### Setup ####
+
+To build Google Test and your tests that use it, you need to tell your
+build system where to find its headers and source files. The exact
+way to do it depends on which build system you use, and is usually
+straightforward.
+
+#### Build ####
+
+Suppose you put Google Test in directory `${GTEST_DIR}`. To build it,
+create a library build target (or a project as called by Visual Studio
+and Xcode) to compile
+
+ ${GTEST_DIR}/src/gtest-all.cc
+
+with `${GTEST_DIR}/include` in the system header search path and `${GTEST_DIR}`
+in the normal header search path. Assuming a Linux-like system and gcc,
+something like the following will do:
+
+ g++ -isystem ${GTEST_DIR}/include -I${GTEST_DIR} \
+ -pthread -c ${GTEST_DIR}/src/gtest-all.cc
+ ar -rv libgtest.a gtest-all.o
+
+(We need `-pthread` as Google Test uses threads.)
+
+Next, you should compile your test source file with
+`${GTEST_DIR}/include` in the system header search path, and link it
+with gtest and any other necessary libraries:
+
+ g++ -isystem ${GTEST_DIR}/include -pthread path/to/your_test.cc libgtest.a \
+ -o your_test
+
+As an example, the make/ directory contains a Makefile that you can
+use to build Google Test on systems where GNU make is available
+(e.g. Linux, Mac OS X, and Cygwin). It doesn't try to build Google
+Test's own tests. Instead, it just builds the Google Test library and
+a sample test. You can use it as a starting point for your own build
+script.
+
+If the default settings are correct for your environment, the
+following commands should succeed:
+
+ cd ${GTEST_DIR}/make
+ make
+ ./sample1_unittest
+
+If you see errors, try to tweak the contents of `make/Makefile` to make
+them go away. There are instructions in `make/Makefile` on how to do
+it.
+
+### Using CMake ###
+
+Google Test comes with a CMake build script (
+[CMakeLists.txt](CMakeLists.txt)) that can be used on a wide range of platforms ("C" stands for
+cross-platform.). If you don't have CMake installed already, you can
+download it for free from <http://www.cmake.org/>.
+
+CMake works by generating native makefiles or build projects that can
+be used in the compiler environment of your choice. The typical
+workflow starts with:
+
+ mkdir mybuild # Create a directory to hold the build output.
+ cd mybuild
+ cmake ${GTEST_DIR} # Generate native build scripts.
+
+If you want to build Google Test's samples, you should replace the
+last command with
+
+ cmake -Dgtest_build_samples=ON ${GTEST_DIR}
+
+If you are on a \*nix system, you should now see a Makefile in the
+current directory. Just type 'make' to build gtest.
+
+If you use Windows and have Visual Studio installed, a `gtest.sln` file
+and several `.vcproj` files will be created. You can then build them
+using Visual Studio.
+
+On Mac OS X with Xcode installed, a `.xcodeproj` file will be generated.
+
+### Legacy Build Scripts ###
+
+Before settling on CMake, we have been providing hand-maintained build
+projects/scripts for Visual Studio, Xcode, and Autotools. While we
+continue to provide them for convenience, they are not actively
+maintained any more. We highly recommend that you follow the
+instructions in the previous two sections to integrate Google Test
+with your existing build system.
+
+If you still need to use the legacy build scripts, here's how:
+
+The msvc\ folder contains two solutions with Visual C++ projects.
+Open the `gtest.sln` or `gtest-md.sln` file using Visual Studio, and you
+are ready to build Google Test the same way you build any Visual
+Studio project. Files that have names ending with -md use DLL
+versions of Microsoft runtime libraries (the /MD or the /MDd compiler
+option). Files without that suffix use static versions of the runtime
+libraries (the /MT or the /MTd option). Please note that one must use
+the same option to compile both gtest and the test code. If you use
+Visual Studio 2005 or above, we recommend the -md version as /MD is
+the default for new projects in these versions of Visual Studio.
+
+On Mac OS X, open the `gtest.xcodeproj` in the `xcode/` folder using
+Xcode. Build the "gtest" target. The universal binary framework will
+end up in your selected build directory (selected in the Xcode
+"Preferences..." -> "Building" pane and defaults to xcode/build).
+Alternatively, at the command line, enter:
+
+ xcodebuild
+
+This will build the "Release" configuration of gtest.framework in your
+default build location. See the "xcodebuild" man page for more
+information about building different configurations and building in
+different locations.
+
+If you wish to use the Google Test Xcode project with Xcode 4.x and
+above, you need to either:
+
+ * update the SDK configuration options in xcode/Config/General.xconfig.
+ Comment options `SDKROOT`, `MACOS_DEPLOYMENT_TARGET`, and `GCC_VERSION`. If
+ you choose this route you lose the ability to target earlier versions
+ of MacOS X.
+ * Install an SDK for an earlier version. This doesn't appear to be
+ supported by Apple, but has been reported to work
+ (http://stackoverflow.com/questions/5378518).
+
+### Tweaking Google Test ###
+
+Google Test can be used in diverse environments. The default
+configuration may not work (or may not work well) out of the box in
+some environments. However, you can easily tweak Google Test by
+defining control macros on the compiler command line. Generally,
+these macros are named like `GTEST_XYZ` and you define them to either 1
+or 0 to enable or disable a certain feature.
+
+We list the most frequently used macros below. For a complete list,
+see file [include/gtest/internal/gtest-port.h](include/gtest/internal/gtest-port.h).
+
+### Choosing a TR1 Tuple Library ###
+
+Some Google Test features require the C++ Technical Report 1 (TR1)
+tuple library, which is not yet available with all compilers. The
+good news is that Google Test implements a subset of TR1 tuple that's
+enough for its own need, and will automatically use this when the
+compiler doesn't provide TR1 tuple.
+
+Usually you don't need to care about which tuple library Google Test
+uses. However, if your project already uses TR1 tuple, you need to
+tell Google Test to use the same TR1 tuple library the rest of your
+project uses, or the two tuple implementations will clash. To do
+that, add
+
+ -DGTEST_USE_OWN_TR1_TUPLE=0
+
+to the compiler flags while compiling Google Test and your tests. If
+you want to force Google Test to use its own tuple library, just add
+
+ -DGTEST_USE_OWN_TR1_TUPLE=1
+
+to the compiler flags instead.
+
+If you don't want Google Test to use tuple at all, add
+
+ -DGTEST_HAS_TR1_TUPLE=0
+
+and all features using tuple will be disabled.
+
+### Multi-threaded Tests ###
+
+Google Test is thread-safe where the pthread library is available.
+After `#include "gtest/gtest.h"`, you can check the `GTEST_IS_THREADSAFE`
+macro to see whether this is the case (yes if the macro is `#defined` to
+1, no if it's undefined.).
+
+If Google Test doesn't correctly detect whether pthread is available
+in your environment, you can force it with
+
+ -DGTEST_HAS_PTHREAD=1
+
+or
+
+ -DGTEST_HAS_PTHREAD=0
+
+When Google Test uses pthread, you may need to add flags to your
+compiler and/or linker to select the pthread library, or you'll get
+link errors. If you use the CMake script or the deprecated Autotools
+script, this is taken care of for you. If you use your own build
+script, you'll need to read your compiler and linker's manual to
+figure out what flags to add.
+
+### As a Shared Library (DLL) ###
+
+Google Test is compact, so most users can build and link it as a
+static library for the simplicity. You can choose to use Google Test
+as a shared library (known as a DLL on Windows) if you prefer.
+
+To compile *gtest* as a shared library, add
+
+ -DGTEST_CREATE_SHARED_LIBRARY=1
+
+to the compiler flags. You'll also need to tell the linker to produce
+a shared library instead - consult your linker's manual for how to do
+it.
+
+To compile your *tests* that use the gtest shared library, add
+
+ -DGTEST_LINKED_AS_SHARED_LIBRARY=1
+
+to the compiler flags.
+
+Note: while the above steps aren't technically necessary today when
+using some compilers (e.g. GCC), they may become necessary in the
+future, if we decide to improve the speed of loading the library (see
+<http://gcc.gnu.org/wiki/Visibility> for details). Therefore you are
+recommended to always add the above flags when using Google Test as a
+shared library. Otherwise a future release of Google Test may break
+your build script.
+
+### Avoiding Macro Name Clashes ###
+
+In C++, macros don't obey namespaces. Therefore two libraries that
+both define a macro of the same name will clash if you `#include` both
+definitions. In case a Google Test macro clashes with another
+library, you can force Google Test to rename its macro to avoid the
+conflict.
+
+Specifically, if both Google Test and some other code define macro
+FOO, you can add
+
+ -DGTEST_DONT_DEFINE_FOO=1
+
+to the compiler flags to tell Google Test to change the macro's name
+from `FOO` to `GTEST_FOO`. Currently `FOO` can be `FAIL`, `SUCCEED`,
+or `TEST`. For example, with `-DGTEST_DONT_DEFINE_TEST=1`, you'll
+need to write
+
+ GTEST_TEST(SomeTest, DoesThis) { ... }
+
+instead of
+
+ TEST(SomeTest, DoesThis) { ... }
+
+in order to define a test.
+
+## Developing Google Test ##
+
+This section discusses how to make your own changes to Google Test.
+
+### Testing Google Test Itself ###
+
+To make sure your changes work as intended and don't break existing
+functionality, you'll want to compile and run Google Test's own tests.
+For that you can use CMake:
+
+ mkdir mybuild
+ cd mybuild
+ cmake -Dgtest_build_tests=ON ${GTEST_DIR}
+
+Make sure you have Python installed, as some of Google Test's tests
+are written in Python. If the cmake command complains about not being
+able to find Python (`Could NOT find PythonInterp (missing:
+PYTHON_EXECUTABLE)`), try telling it explicitly where your Python
+executable can be found:
+
+ cmake -DPYTHON_EXECUTABLE=path/to/python -Dgtest_build_tests=ON ${GTEST_DIR}
+
+Next, you can build Google Test and all of its own tests. On \*nix,
+this is usually done by 'make'. To run the tests, do
+
+ make test
+
+All tests should pass.
+
+Normally you don't need to worry about regenerating the source files,
+unless you need to modify them. In that case, you should modify the
+corresponding .pump files instead and run the pump.py Python script to
+regenerate them. You can find pump.py in the [scripts/](scripts/) directory.
+Read the [Pump manual](docs/PumpManual.md) for how to use it.
diff --git a/media/libaom/src/third_party/googletest/src/googletest/cmake/internal_utils.cmake b/media/libaom/src/third_party/googletest/src/googletest/cmake/internal_utils.cmake
new file mode 100644
index 000000000..777b91ed4
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/cmake/internal_utils.cmake
@@ -0,0 +1,254 @@
+# Defines functions and macros useful for building Google Test and
+# Google Mock.
+#
+# Note:
+#
+# - This file will be run twice when building Google Mock (once via
+# Google Test's CMakeLists.txt, and once via Google Mock's).
+# Therefore it shouldn't have any side effects other than defining
+# the functions and macros.
+#
+# - The functions/macros defined in this file may depend on Google
+# Test and Google Mock's option() definitions, and thus must be
+# called *after* the options have been defined.
+
+# Tweaks CMake's default compiler/linker settings to suit Google Test's needs.
+#
+# This must be a macro(), as inside a function string() can only
+# update variables in the function scope.
+macro(fix_default_compiler_settings_)
+ if (MSVC)
+ # For MSVC, CMake sets certain flags to defaults we want to override.
+ # This replacement code is taken from sample in the CMake Wiki at
+ # http://www.cmake.org/Wiki/CMake_FAQ#Dynamic_Replace.
+ foreach (flag_var
+ CMAKE_CXX_FLAGS CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELEASE
+ CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
+ if (NOT BUILD_SHARED_LIBS AND NOT gtest_force_shared_crt)
+ # When Google Test is built as a shared library, it should also use
+ # shared runtime libraries. Otherwise, it may end up with multiple
+ # copies of runtime library data in different modules, resulting in
+ # hard-to-find crashes. When it is built as a static library, it is
+ # preferable to use CRT as static libraries, as we don't have to rely
+ # on CRT DLLs being available. CMake always defaults to using shared
+ # CRT libraries, so we override that default here.
+ string(REPLACE "/MD" "-MT" ${flag_var} "${${flag_var}}")
+ endif()
+
+ # We prefer more strict warning checking for building Google Test.
+ # Replaces /W3 with /W4 in defaults.
+ string(REPLACE "/W3" "/W4" ${flag_var} "${${flag_var}}")
+ endforeach()
+ endif()
+endmacro()
+
+# Defines the compiler/linker flags used to build Google Test and
+# Google Mock. You can tweak these definitions to suit your need. A
+# variable's value is empty before it's explicitly assigned to.
+macro(config_compiler_and_linker)
+ if (NOT gtest_disable_pthreads)
+ # Defines CMAKE_USE_PTHREADS_INIT and CMAKE_THREAD_LIBS_INIT.
+ find_package(Threads)
+ endif()
+
+ fix_default_compiler_settings_()
+ if (MSVC)
+ # Newlines inside flags variables break CMake's NMake generator.
+ # TODO(vladl@google.com): Add -RTCs and -RTCu to debug builds.
+ set(cxx_base_flags "-GS -W4 -WX -wd4251 -wd4275 -nologo -J -Zi")
+ if (MSVC_VERSION LESS 1400) # 1400 is Visual Studio 2005
+ # Suppress spurious warnings MSVC 7.1 sometimes issues.
+ # Forcing value to bool.
+ set(cxx_base_flags "${cxx_base_flags} -wd4800")
+ # Copy constructor and assignment operator could not be generated.
+ set(cxx_base_flags "${cxx_base_flags} -wd4511 -wd4512")
+ # Compatibility warnings not applicable to Google Test.
+ # Resolved overload was found by argument-dependent lookup.
+ set(cxx_base_flags "${cxx_base_flags} -wd4675")
+ endif()
+ if (MSVC_VERSION LESS 1500) # 1500 is Visual Studio 2008
+ # Conditional expression is constant.
+ # When compiling with /W4, we get several instances of C4127
+ # (Conditional expression is constant). In our code, we disable that
+ # warning on a case-by-case basis. However, on Visual Studio 2005,
+ # the warning fires on std::list. Therefore on that compiler and earlier,
+ # we disable the warning project-wide.
+ set(cxx_base_flags "${cxx_base_flags} -wd4127")
+ endif()
+ if (NOT (MSVC_VERSION LESS 1700)) # 1700 is Visual Studio 2012.
+ # Suppress "unreachable code" warning on VS 2012 and later.
+ # http://stackoverflow.com/questions/3232669 explains the issue.
+ set(cxx_base_flags "${cxx_base_flags} -wd4702")
+ endif()
+ if (NOT (MSVC_VERSION GREATER 1900)) # 1900 is Visual Studio 2015
+ # BigObj required for tests.
+ set(cxx_base_flags "${cxx_base_flags} -bigobj")
+ endif()
+
+ set(cxx_base_flags "${cxx_base_flags} -D_UNICODE -DUNICODE -DWIN32 -D_WIN32")
+ set(cxx_base_flags "${cxx_base_flags} -DSTRICT -DWIN32_LEAN_AND_MEAN")
+ set(cxx_exception_flags "-EHsc -D_HAS_EXCEPTIONS=1")
+ set(cxx_no_exception_flags "-D_HAS_EXCEPTIONS=0")
+ set(cxx_no_rtti_flags "-GR-")
+ elseif (CMAKE_COMPILER_IS_GNUCXX)
+ set(cxx_base_flags "-Wall -Wshadow")
+ set(cxx_exception_flags "-fexceptions")
+ set(cxx_no_exception_flags "-fno-exceptions")
+ # Until version 4.3.2, GCC doesn't define a macro to indicate
+ # whether RTTI is enabled. Therefore we define GTEST_HAS_RTTI
+ # explicitly.
+ set(cxx_no_rtti_flags "-fno-rtti -DGTEST_HAS_RTTI=0")
+ set(cxx_strict_flags
+ "-Wextra -Wno-unused-parameter -Wno-missing-field-initializers")
+ elseif (CMAKE_CXX_COMPILER_ID STREQUAL "SunPro")
+ set(cxx_exception_flags "-features=except")
+ # Sun Pro doesn't provide macros to indicate whether exceptions and
+ # RTTI are enabled, so we define GTEST_HAS_* explicitly.
+ set(cxx_no_exception_flags "-features=no%except -DGTEST_HAS_EXCEPTIONS=0")
+ set(cxx_no_rtti_flags "-features=no%rtti -DGTEST_HAS_RTTI=0")
+ elseif (CMAKE_CXX_COMPILER_ID STREQUAL "VisualAge" OR
+ CMAKE_CXX_COMPILER_ID STREQUAL "XL")
+ # CMake 2.8 changes Visual Age's compiler ID to "XL".
+ set(cxx_exception_flags "-qeh")
+ set(cxx_no_exception_flags "-qnoeh")
+ # Until version 9.0, Visual Age doesn't define a macro to indicate
+ # whether RTTI is enabled. Therefore we define GTEST_HAS_RTTI
+ # explicitly.
+ set(cxx_no_rtti_flags "-qnortti -DGTEST_HAS_RTTI=0")
+ elseif (CMAKE_CXX_COMPILER_ID STREQUAL "HP")
+ set(cxx_base_flags "-AA -mt")
+ set(cxx_exception_flags "-DGTEST_HAS_EXCEPTIONS=1")
+ set(cxx_no_exception_flags "+noeh -DGTEST_HAS_EXCEPTIONS=0")
+ # RTTI can not be disabled in HP aCC compiler.
+ set(cxx_no_rtti_flags "")
+ endif()
+
+ if (CMAKE_USE_PTHREADS_INIT) # The pthreads library is available and allowed.
+ set(cxx_base_flags "${cxx_base_flags} -DGTEST_HAS_PTHREAD=1")
+ else()
+ set(cxx_base_flags "${cxx_base_flags} -DGTEST_HAS_PTHREAD=0")
+ endif()
+
+ # For building gtest's own tests and samples.
+ set(cxx_exception "${CMAKE_CXX_FLAGS} ${cxx_base_flags} ${cxx_exception_flags}")
+ set(cxx_no_exception
+ "${CMAKE_CXX_FLAGS} ${cxx_base_flags} ${cxx_no_exception_flags}")
+ set(cxx_default "${cxx_exception}")
+ set(cxx_no_rtti "${cxx_default} ${cxx_no_rtti_flags}")
+ set(cxx_use_own_tuple "${cxx_default} -DGTEST_USE_OWN_TR1_TUPLE=1")
+
+ # For building the gtest libraries.
+ set(cxx_strict "${cxx_default} ${cxx_strict_flags}")
+endmacro()
+
+# Defines the gtest & gtest_main libraries. User tests should link
+# with one of them.
+function(cxx_library_with_type name type cxx_flags)
+ # type can be either STATIC or SHARED to denote a static or shared library.
+ # ARGN refers to additional arguments after 'cxx_flags'.
+ add_library(${name} ${type} ${ARGN})
+ set_target_properties(${name}
+ PROPERTIES
+ COMPILE_FLAGS "${cxx_flags}")
+ if (BUILD_SHARED_LIBS OR type STREQUAL "SHARED")
+ set_target_properties(${name}
+ PROPERTIES
+ COMPILE_DEFINITIONS "GTEST_CREATE_SHARED_LIBRARY=1")
+ endif()
+ if (CMAKE_USE_PTHREADS_INIT)
+ target_link_libraries(${name} ${CMAKE_THREAD_LIBS_INIT})
+ endif()
+endfunction()
+
+########################################################################
+#
+# Helper functions for creating build targets.
+
+function(cxx_shared_library name cxx_flags)
+ cxx_library_with_type(${name} SHARED "${cxx_flags}" ${ARGN})
+endfunction()
+
+function(cxx_library name cxx_flags)
+ cxx_library_with_type(${name} "" "${cxx_flags}" ${ARGN})
+endfunction()
+
+# cxx_executable_with_flags(name cxx_flags libs srcs...)
+#
+# creates a named C++ executable that depends on the given libraries and
+# is built from the given source files with the given compiler flags.
+function(cxx_executable_with_flags name cxx_flags libs)
+ add_executable(${name} ${ARGN})
+ if (cxx_flags)
+ set_target_properties(${name}
+ PROPERTIES
+ COMPILE_FLAGS "${cxx_flags}")
+ endif()
+ if (BUILD_SHARED_LIBS)
+ set_target_properties(${name}
+ PROPERTIES
+ COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1")
+ endif()
+ # To support mixing linking in static and dynamic libraries, link each
+ # library in with an extra call to target_link_libraries.
+ foreach (lib "${libs}")
+ target_link_libraries(${name} ${lib})
+ endforeach()
+endfunction()
+
+# cxx_executable(name dir lib srcs...)
+#
+# creates a named target that depends on the given libs and is built
+# from the given source files. dir/name.cc is implicitly included in
+# the source file list.
+function(cxx_executable name dir libs)
+ cxx_executable_with_flags(
+ ${name} "${cxx_default}" "${libs}" "${dir}/${name}.cc" ${ARGN})
+endfunction()
+
+# Sets PYTHONINTERP_FOUND and PYTHON_EXECUTABLE.
+find_package(PythonInterp)
+
+# cxx_test_with_flags(name cxx_flags libs srcs...)
+#
+# creates a named C++ test that depends on the given libs and is built
+# from the given source files with the given compiler flags.
+function(cxx_test_with_flags name cxx_flags libs)
+ cxx_executable_with_flags(${name} "${cxx_flags}" "${libs}" ${ARGN})
+ add_test(${name} ${name})
+endfunction()
+
+# cxx_test(name libs srcs...)
+#
+# creates a named test target that depends on the given libs and is
+# built from the given source files. Unlike cxx_test_with_flags,
+# test/name.cc is already implicitly included in the source file list.
+function(cxx_test name libs)
+ cxx_test_with_flags("${name}" "${cxx_default}" "${libs}"
+ "test/${name}.cc" ${ARGN})
+endfunction()
+
+# py_test(name)
+#
+# creates a Python test with the given name whose main module is in
+# test/name.py. It does nothing if Python is not installed.
+function(py_test name)
+ # We are not supporting Python tests on Linux yet as they consider
+ # all Linux environments to be google3 and try to use google3 features.
+ if (PYTHONINTERP_FOUND)
+ # ${CMAKE_BINARY_DIR} is known at configuration time, so we can
+ # directly bind it from cmake. ${CTEST_CONFIGURATION_TYPE} is known
+ # only at ctest runtime (by calling ctest -c <Configuration>), so
+ # we have to escape $ to delay variable substitution here.
+ if (${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} GREATER 3.1)
+ add_test(
+ NAME ${name}
+ COMMAND ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test/${name}.py
+ --build_dir=${CMAKE_CURRENT_BINARY_DIR}/$<CONFIGURATION>)
+ else (${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} GREATER 3.1)
+ add_test(
+ ${name}
+ ${PYTHON_EXECUTABLE} ${CMAKE_CURRENT_SOURCE_DIR}/test/${name}.py
+ --build_dir=${CMAKE_CURRENT_BINARY_DIR}/\${CTEST_CONFIGURATION_TYPE})
+ endif (${CMAKE_MAJOR_VERSION}.${CMAKE_MINOR_VERSION} GREATER 3.1)
+ endif()
+endfunction()
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-death-test.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-death-test.h
new file mode 100644
index 000000000..957a69c6a
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-death-test.h
@@ -0,0 +1,294 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for death tests. It is
+// #included by gtest.h so a user doesn't need to include this
+// directly.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
+
+#include "gtest/internal/gtest-death-test-internal.h"
+
+namespace testing {
+
+// This flag controls the style of death tests. Valid values are "threadsafe",
+// meaning that the death test child process will re-execute the test binary
+// from the start, running only a single death test, or "fast",
+// meaning that the child process will execute the test logic immediately
+// after forking.
+GTEST_DECLARE_string_(death_test_style);
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process. Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests. IMPORTANT: This is an internal utility. Using it may break the
+// implementation of death tests. User code MUST NOT use it.
+GTEST_API_ bool InDeathTestChild();
+
+} // namespace internal
+
+// The following macros are useful for writing death tests.
+
+// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is
+// executed:
+//
+// 1. It generates a warning if there is more than one active
+// thread. This is because it's safe to fork() or clone() only
+// when there is a single thread.
+//
+// 2. The parent process clone()s a sub-process and runs the death
+// test in it; the sub-process exits with code 0 at the end of the
+// death test, if it hasn't exited already.
+//
+// 3. The parent process waits for the sub-process to terminate.
+//
+// 4. The parent process checks the exit code and error message of
+// the sub-process.
+//
+// Examples:
+//
+// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number");
+// for (int i = 0; i < 5; i++) {
+// EXPECT_DEATH(server.ProcessRequest(i),
+// "Invalid request .* in ProcessRequest()")
+// << "Failed to die on request " << i;
+// }
+//
+// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting");
+//
+// bool KilledBySIGHUP(int exit_code) {
+// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP;
+// }
+//
+// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!");
+//
+// On the regular expressions used in death tests:
+//
+// On POSIX-compliant systems (*nix), we use the <regex.h> library,
+// which uses the POSIX extended regex syntax.
+//
+// On other platforms (e.g. Windows), we only support a simple regex
+// syntax implemented as part of Google Test. This limited
+// implementation should be enough most of the time when writing
+// death tests; though it lacks many features you can find in PCRE
+// or POSIX extended regex syntax. For example, we don't support
+// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and
+// repetition count ("x{5,7}"), among others.
+//
+// Below is the syntax that we do support. We chose it to be a
+// subset of both PCRE and POSIX extended regex, so it's easy to
+// learn wherever you come from. In the following: 'A' denotes a
+// literal character, period (.), or a single \\ escape sequence;
+// 'x' and 'y' denote regular expressions; 'm' and 'n' are for
+// natural numbers.
+//
+// c matches any literal character c
+// \\d matches any decimal digit
+// \\D matches any character that's not a decimal digit
+// \\f matches \f
+// \\n matches \n
+// \\r matches \r
+// \\s matches any ASCII whitespace, including \n
+// \\S matches any character that's not a whitespace
+// \\t matches \t
+// \\v matches \v
+// \\w matches any letter, _, or decimal digit
+// \\W matches any character that \\w doesn't match
+// \\c matches any literal character c, which must be a punctuation
+// . matches any single character except \n
+// A? matches 0 or 1 occurrences of A
+// A* matches 0 or many occurrences of A
+// A+ matches 1 or many occurrences of A
+// ^ matches the beginning of a string (not that of each line)
+// $ matches the end of a string (not that of each line)
+// xy matches x followed by y
+//
+// If you accidentally use PCRE or POSIX extended regex features
+// not implemented by us, you will get a run-time failure. In that
+// case, please try to rewrite your regular expression within the
+// above syntax.
+//
+// This implementation is *not* meant to be as highly tuned or robust
+// as a compiled regex library, but should perform well enough for a
+// death test, which already incurs significant overhead by launching
+// a child process.
+//
+// Known caveats:
+//
+// A "threadsafe" style death test obtains the path to the test
+// program from argv[0] and re-executes it in the sub-process. For
+// simplicity, the current implementation doesn't search the PATH
+// when launching the sub-process. This means that the user must
+// invoke the test program via a path that contains at least one
+// path separator (e.g. path/to/foo_test and
+// /absolute/path/to/bar_test are fine, but foo_test is not). This
+// is rarely a problem as people usually don't put the test binary
+// directory in PATH.
+//
+// TODO(wan@google.com): make thread-safe death tests search the PATH.
+
+// Asserts that a given statement causes the program to exit, with an
+// integer exit status that satisfies predicate, and emitting error output
+// that matches regex.
+# define ASSERT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_)
+
+// Like ASSERT_EXIT, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_EXIT(statement, predicate, regex) \
+ GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_)
+
+// Asserts that a given statement causes the program to exit, either by
+// explicitly exiting with a nonzero exit code or being killed by a
+// signal, and emitting error output that matches regex.
+# define ASSERT_DEATH(statement, regex) \
+ ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Like ASSERT_DEATH, but continues on to successive tests in the
+// test case, if any:
+# define EXPECT_DEATH(statement, regex) \
+ EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex)
+
+// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*:
+
+// Tests that an exit code describes a normal exit with a given exit code.
+class GTEST_API_ ExitedWithCode {
+ public:
+ explicit ExitedWithCode(int exit_code);
+ bool operator()(int exit_status) const;
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ExitedWithCode& other);
+
+ const int exit_code_;
+};
+
+# if !GTEST_OS_WINDOWS
+// Tests that an exit code describes an exit due to termination by a
+// given signal.
+class GTEST_API_ KilledBySignal {
+ public:
+ explicit KilledBySignal(int signum);
+ bool operator()(int exit_status) const;
+ private:
+ const int signum_;
+};
+# endif // !GTEST_OS_WINDOWS
+
+// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode.
+// The death testing framework causes this to have interesting semantics,
+// since the sideeffects of the call are only visible in opt mode, and not
+// in debug mode.
+//
+// In practice, this can be used to test functions that utilize the
+// LOG(DFATAL) macro using the following style:
+//
+// int DieInDebugOr12(int* sideeffect) {
+// if (sideeffect) {
+// *sideeffect = 12;
+// }
+// LOG(DFATAL) << "death";
+// return 12;
+// }
+//
+// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) {
+// int sideeffect = 0;
+// // Only asserts in dbg.
+// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death");
+//
+// #ifdef NDEBUG
+// // opt-mode has sideeffect visible.
+// EXPECT_EQ(12, sideeffect);
+// #else
+// // dbg-mode no visible sideeffect.
+// EXPECT_EQ(0, sideeffect);
+// #endif
+// }
+//
+// This will assert that DieInDebugReturn12InOpt() crashes in debug
+// mode, usually due to a DCHECK or LOG(DFATAL), but returns the
+// appropriate fallback value (12 in this case) in opt mode. If you
+// need to test that a function has appropriate side-effects in opt
+// mode, include assertions against the side-effects. A general
+// pattern for this is:
+//
+// EXPECT_DEBUG_DEATH({
+// // Side-effects here will have an effect after this statement in
+// // opt mode, but none in debug mode.
+// EXPECT_EQ(12, DieInDebugOr12(&sideeffect));
+// }, "death");
+//
+# ifdef NDEBUG
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ GTEST_EXECUTE_STATEMENT_(statement, regex)
+
+# else
+
+# define EXPECT_DEBUG_DEATH(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+
+# define ASSERT_DEBUG_DEATH(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+
+# endif // NDEBUG for EXPECT_DEBUG_DEATH
+#endif // GTEST_HAS_DEATH_TEST
+
+// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and
+// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if
+// death tests are supported; otherwise they just issue a warning. This is
+// useful when you are combining death test assertions with normal test
+// assertions in one test.
+#if GTEST_HAS_DEATH_TEST
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ EXPECT_DEATH(statement, regex)
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ ASSERT_DEATH(statement, regex)
+#else
+# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, )
+# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \
+ GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return)
+#endif
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-message.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-message.h
new file mode 100644
index 000000000..fe879bca7
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-message.h
@@ -0,0 +1,250 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the Message class.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
+
+#include <limits>
+
+#include "gtest/internal/gtest-port.h"
+
+// Ensures that there is at least one operator<< in the global namespace.
+// See Message& operator<<(...) below for why.
+void operator<<(const testing::internal::Secret&, int);
+
+namespace testing {
+
+// The Message class works like an ostream repeater.
+//
+// Typical usage:
+//
+// 1. You stream a bunch of values to a Message object.
+// It will remember the text in a stringstream.
+// 2. Then you stream the Message object to an ostream.
+// This causes the text in the Message to be streamed
+// to the ostream.
+//
+// For example;
+//
+// testing::Message foo;
+// foo << 1 << " != " << 2;
+// std::cout << foo;
+//
+// will print "1 != 2".
+//
+// Message is not intended to be inherited from. In particular, its
+// destructor is not virtual.
+//
+// Note that stringstream behaves differently in gcc and in MSVC. You
+// can stream a NULL char pointer to it in the former, but not in the
+// latter (it causes an access violation if you do). The Message
+// class hides this difference by treating a NULL char pointer as
+// "(null)".
+class GTEST_API_ Message {
+ private:
+ // The type of basic IO manipulators (endl, ends, and flush) for
+ // narrow streams.
+ typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&);
+
+ public:
+ // Constructs an empty Message.
+ Message();
+
+ // Copy constructor.
+ Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT
+ *ss_ << msg.GetString();
+ }
+
+ // Constructs a Message from a C-string.
+ explicit Message(const char* str) : ss_(new ::std::stringstream) {
+ *ss_ << str;
+ }
+
+#if GTEST_OS_SYMBIAN
+ // Streams a value (either a pointer or not) to this object.
+ template <typename T>
+ inline Message& operator <<(const T& value) {
+ StreamHelper(typename internal::is_pointer<T>::type(), value);
+ return *this;
+ }
+#else
+ // Streams a non-pointer value to this object.
+ template <typename T>
+ inline Message& operator <<(const T& val) {
+ // Some libraries overload << for STL containers. These
+ // overloads are defined in the global namespace instead of ::std.
+ //
+ // C++'s symbol lookup rule (i.e. Koenig lookup) says that these
+ // overloads are visible in either the std namespace or the global
+ // namespace, but not other namespaces, including the testing
+ // namespace which Google Test's Message class is in.
+ //
+ // To allow STL containers (and other types that has a << operator
+ // defined in the global namespace) to be used in Google Test
+ // assertions, testing::Message must access the custom << operator
+ // from the global namespace. With this using declaration,
+ // overloads of << defined in the global namespace and those
+ // visible via Koenig lookup are both exposed in this function.
+ using ::operator <<;
+ *ss_ << val;
+ return *this;
+ }
+
+ // Streams a pointer value to this object.
+ //
+ // This function is an overload of the previous one. When you
+ // stream a pointer to a Message, this definition will be used as it
+ // is more specialized. (The C++ Standard, section
+ // [temp.func.order].) If you stream a non-pointer, then the
+ // previous definition will be used.
+ //
+ // The reason for this overload is that streaming a NULL pointer to
+ // ostream is undefined behavior. Depending on the compiler, you
+ // may get "0", "(nil)", "(null)", or an access violation. To
+ // ensure consistent result across compilers, we always treat NULL
+ // as "(null)".
+ template <typename T>
+ inline Message& operator <<(T* const& pointer) { // NOLINT
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ *ss_ << pointer;
+ }
+ return *this;
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // Since the basic IO manipulators are overloaded for both narrow
+ // and wide streams, we have to provide this specialized definition
+ // of operator <<, even though its body is the same as the
+ // templatized version above. Without this definition, streaming
+ // endl or other basic IO manipulators to Message will confuse the
+ // compiler.
+ Message& operator <<(BasicNarrowIoManip val) {
+ *ss_ << val;
+ return *this;
+ }
+
+ // Instead of 1/0, we want to see true/false for bool values.
+ Message& operator <<(bool b) {
+ return *this << (b ? "true" : "false");
+ }
+
+ // These two overloads allow streaming a wide C string to a Message
+ // using the UTF-8 encoding.
+ Message& operator <<(const wchar_t* wide_c_str);
+ Message& operator <<(wchar_t* wide_c_str);
+
+#if GTEST_HAS_STD_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::std::wstring& wstr);
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+ // Converts the given wide string to a narrow string using the UTF-8
+ // encoding, and streams the result to this Message object.
+ Message& operator <<(const ::wstring& wstr);
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+ // Gets the text streamed to this object so far as an std::string.
+ // Each '\0' character in the buffer is replaced with "\\0".
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ std::string GetString() const;
+
+ private:
+
+#if GTEST_OS_SYMBIAN
+ // These are needed as the Nokia Symbian Compiler cannot decide between
+ // const T& and const T* in a function template. The Nokia compiler _can_
+ // decide between class template specializations for T and T*, so a
+ // tr1::type_traits-like is_pointer works, and we can overload on that.
+ template <typename T>
+ inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) {
+ if (pointer == NULL) {
+ *ss_ << "(null)";
+ } else {
+ *ss_ << pointer;
+ }
+ }
+ template <typename T>
+ inline void StreamHelper(internal::false_type /*is_pointer*/,
+ const T& value) {
+ // See the comments in Message& operator <<(const T&) above for why
+ // we need this using statement.
+ using ::operator <<;
+ *ss_ << value;
+ }
+#endif // GTEST_OS_SYMBIAN
+
+ // We'll hold the text streamed to this object here.
+ const internal::scoped_ptr< ::std::stringstream> ss_;
+
+ // We declare (but don't implement) this to prevent the compiler
+ // from implementing the assignment operator.
+ void operator=(const Message&);
+};
+
+// Streams a Message to an ostream.
+inline std::ostream& operator <<(std::ostream& os, const Message& sb) {
+ return os << sb.GetString();
+}
+
+namespace internal {
+
+// Converts a streamable value to an std::string. A NULL pointer is
+// converted to "(null)". When the input value is a ::string,
+// ::std::string, ::wstring, or ::std::wstring object, each NUL
+// character in it is replaced with "\\0".
+template <typename T>
+std::string StreamableToString(const T& streamable) {
+ return (Message() << streamable).GetString();
+}
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h
new file mode 100644
index 000000000..038f9ba79
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h
@@ -0,0 +1,1444 @@
+// This file was GENERATED by command:
+// pump.py gtest-param-test.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: vladl@google.com (Vlad Losev)
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing Framework (Google Test)
+//
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It is usually derived from testing::TestWithParam<T> (see below for
+// another inheritance scheme that's sometimes useful in more complicated
+// class hierarchies), where the type of your parameter values.
+// TestWithParam<T> is itself derived from testing::Test. T can be any
+// copyable type. If it's a raw pointer, you are responsible for managing the
+// lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+ // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+ // Inside a test, access the test parameter with the GetParam() method
+ // of the TestWithParam<T> class:
+ EXPECT_TRUE(foo.Blah(GetParam()));
+ ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+ ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a summary of them, which
+// are all in the testing namespace:
+//
+//
+// Range(begin, end [, step]) - Yields values {begin, begin+step,
+// begin+step+step, ...}. The values do not
+// include end. step defaults to 1.
+// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
+// ValuesIn(container) - Yields values from a C-style array, an STL
+// ValuesIn(begin,end) container, or an iterator range [begin, end).
+// Bool() - Yields sequence {false, true}.
+// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
+// for the math savvy) of the values generated
+// by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test case
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_CASE_P(InstantiationName,
+ FooTest,
+ Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more then once) the first argument to the
+// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
+// actual test case name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+// * InstantiationName/FooTest.DoesBlah/1 for "miny"
+// * InstantiationName/FooTest.DoesBlah/2 for "moe"
+// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
+// in the given test case, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_CASE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+//
+//
+// A parameterized test fixture must be derived from testing::Test and from
+// testing::WithParamInterface<T>, where T is the type of the parameter
+// values. Inheriting from TestWithParam<T> satisfies that requirement because
+// TestWithParam<T> inherits from both Test and WithParamInterface. In more
+// complicated hierarchies, however, it is occasionally useful to inherit
+// separately from Test and WithParamInterface. For example:
+
+class BaseTest : public ::testing::Test {
+ // You can inherit all the usual members for a non-parameterized test
+ // fixture here.
+};
+
+class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
+ // The usual test fixture members go here too.
+};
+
+TEST_F(BaseTest, HasFoo) {
+ // This is an ordinary non-parameterized test.
+}
+
+TEST_P(DerivedTest, DoesBlah) {
+ // GetParam works just the same here as if you inherit from TestWithParam.
+ EXPECT_TRUE(foo.Blah(GetParam()));
+}
+
+#endif // 0
+
+#include "gtest/internal/gtest-port.h"
+
+#if !GTEST_OS_SYMBIAN
+# include <utility>
+#endif
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include "gtest/internal/gtest-internal.h"
+#include "gtest/internal/gtest-param-util.h"
+#include "gtest/internal/gtest-param-util-generated.h"
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test case is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test case FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+// - returns a generator producing a sequence of values {start, start+1,
+// start+2, ..., }.
+// Range(start, end, step)
+// - returns a generator producing a sequence of values {start, start+step,
+// start+step+step, ..., }.
+// Notes:
+// * The generated sequences never include end. For example, Range(1, 5)
+// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+// returns a generator producing {1, 3, 5, 7}.
+// * start and end must have the same type. That type may be any integral or
+// floating-point type or a user defined type satisfying these conditions:
+// * It must be assignable (have operator=() defined).
+// * It must have operator+() (operator+(int-compatible type) for
+// two-operand version).
+// * It must have operator<() defined.
+// Elements in the resulting sequences will also have that type.
+// * Condition start < end must be satisfied in order for resulting sequences
+// to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+ return internal::ParamGenerator<T>(
+ new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+ return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+// - returns a generator producing sequences with elements from
+// a C-style array.
+// ValuesIn(const Container& container)
+// - returns a generator producing sequences with elements from
+// an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+// - returns a generator producing sequences with elements from
+// a range [begin, end) defined by a pair of STL-style iterators. These
+// iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test case StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
+//
+// This instantiates tests from test case StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+// ::std::vector< ::std::string> v;
+// v.push_back("a");
+// v.push_back("b");
+// return v;
+// }
+//
+// INSTANTIATE_TEST_CASE_P(CharSequence,
+// StlStringTest,
+// ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+// ::std::list<char> list;
+// list.push_back('a');
+// list.push_back('b');
+// return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_CASE_P(CharSequence2,
+// CharTest,
+// ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end) {
+ typedef typename ::testing::internal::IteratorTraits<ForwardIterator>
+ ::value_type ParamType;
+ return internal::ParamGenerator<ParamType>(
+ new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+ return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container) {
+ return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+// - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test case BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
+//
+// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+// Currently, Values() supports from 1 to 50 parameters.
+//
+template <typename T1>
+internal::ValueArray1<T1> Values(T1 v1) {
+ return internal::ValueArray1<T1>(v1);
+}
+
+template <typename T1, typename T2>
+internal::ValueArray2<T1, T2> Values(T1 v1, T2 v2) {
+ return internal::ValueArray2<T1, T2>(v1, v2);
+}
+
+template <typename T1, typename T2, typename T3>
+internal::ValueArray3<T1, T2, T3> Values(T1 v1, T2 v2, T3 v3) {
+ return internal::ValueArray3<T1, T2, T3>(v1, v2, v3);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+internal::ValueArray4<T1, T2, T3, T4> Values(T1 v1, T2 v2, T3 v3, T4 v4) {
+ return internal::ValueArray4<T1, T2, T3, T4>(v1, v2, v3, v4);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+internal::ValueArray5<T1, T2, T3, T4, T5> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5) {
+ return internal::ValueArray5<T1, T2, T3, T4, T5>(v1, v2, v3, v4, v5);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+internal::ValueArray6<T1, T2, T3, T4, T5, T6> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6) {
+ return internal::ValueArray6<T1, T2, T3, T4, T5, T6>(v1, v2, v3, v4, v5, v6);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7) {
+ return internal::ValueArray7<T1, T2, T3, T4, T5, T6, T7>(v1, v2, v3, v4, v5,
+ v6, v7);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) {
+ return internal::ValueArray8<T1, T2, T3, T4, T5, T6, T7, T8>(v1, v2, v3, v4,
+ v5, v6, v7, v8);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) {
+ return internal::ValueArray9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) {
+ return internal::ValueArray10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) {
+ return internal::ValueArray11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10,
+ T11>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) {
+ return internal::ValueArray12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) {
+ return internal::ValueArray13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) {
+ return internal::ValueArray14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) {
+ return internal::ValueArray15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16) {
+ return internal::ValueArray16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17) {
+ return internal::ValueArray17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18) {
+ return internal::ValueArray18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) {
+ return internal::ValueArray19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) {
+ return internal::ValueArray20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) {
+ return internal::ValueArray21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22) {
+ return internal::ValueArray22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23) {
+ return internal::ValueArray23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24) {
+ return internal::ValueArray24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) {
+ return internal::ValueArray25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) {
+ return internal::ValueArray26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) {
+ return internal::ValueArray27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) {
+ return internal::ValueArray28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) {
+ return internal::ValueArray29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) {
+ return internal::ValueArray30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) {
+ return internal::ValueArray31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32) {
+ return internal::ValueArray32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33) {
+ return internal::ValueArray33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34) {
+ return internal::ValueArray34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) {
+ return internal::ValueArray35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) {
+ return internal::ValueArray36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Values(T1 v1, T2 v2, T3 v3,
+ T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37) {
+ return internal::ValueArray37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37>(v1, v2, v3,
+ v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38) {
+ return internal::ValueArray38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38>(v1, v2,
+ v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18,
+ v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32,
+ v33, v34, v35, v36, v37, v38);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Values(T1 v1, T2 v2,
+ T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12,
+ T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20,
+ T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28,
+ T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36,
+ T37 v37, T38 v38, T39 v39) {
+ return internal::ValueArray39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39>(v1,
+ v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17,
+ v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31,
+ v32, v33, v34, v35, v36, v37, v38, v39);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Values(T1 v1,
+ T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11,
+ T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19,
+ T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27,
+ T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35,
+ T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) {
+ return internal::ValueArray40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15,
+ v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29,
+ v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) {
+ return internal::ValueArray41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14,
+ v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28,
+ v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) {
+ return internal::ValueArray42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13,
+ v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27,
+ v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41,
+ v42);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) {
+ return internal::ValueArray43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12,
+ v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26,
+ v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40,
+ v41, v42, v43);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) {
+ return internal::ValueArray44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11,
+ v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25,
+ v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39,
+ v40, v41, v42, v43, v44);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) {
+ return internal::ValueArray45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45>(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10,
+ v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24,
+ v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38,
+ v39, v40, v41, v42, v43, v44, v45);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) {
+ return internal::ValueArray46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46>(v1, v2, v3, v4, v5, v6, v7, v8, v9,
+ v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) {
+ return internal::ValueArray47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47>(v1, v2, v3, v4, v5, v6, v7, v8,
+ v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23,
+ v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37,
+ v38, v39, v40, v41, v42, v43, v44, v45, v46, v47);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6,
+ T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15,
+ T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23,
+ T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31,
+ T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39,
+ T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47,
+ T48 v48) {
+ return internal::ValueArray48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48>(v1, v2, v3, v4, v5, v6, v7,
+ v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22,
+ v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36,
+ v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5,
+ T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14,
+ T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22,
+ T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30,
+ T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38,
+ T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46,
+ T47 v47, T48 v48, T49 v49) {
+ return internal::ValueArray49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49>(v1, v2, v3, v4, v5, v6,
+ v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21,
+ v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35,
+ v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Values(T1 v1, T2 v2, T3 v3, T4 v4,
+ T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13,
+ T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21,
+ T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29,
+ T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37,
+ T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45,
+ T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) {
+ return internal::ValueArray50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12, T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26, T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40, T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>(v1, v2, v3, v4,
+ v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19,
+ v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33,
+ v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47,
+ v48, v49, v50);
+}
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+// - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test case FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+// virtual void SetUp() {
+// external_flag = GetParam();
+// }
+// }
+// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+ return Values(false, true);
+}
+
+# if GTEST_HAS_COMBINE
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+// - returns a generator producing sequences with elements coming from
+// the Cartesian product of elements from the sequences generated by
+// gen1, gen2, ..., genN. The sequence elements will have a type of
+// tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+// of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Combine can have up to 10 arguments. This number is currently limited
+// by the maximum number of elements in the tuple implementation used by Google
+// Test.
+//
+// Example:
+//
+// This will instantiate tests in test case AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+// : public testing::TestWithParam<tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
+// Combine(Values("cat", "dog"),
+// Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+// : public testing::TestWithParam<tuple<bool, bool> > {
+// virtual void SetUp() {
+// // Assigns external_flag_1 and external_flag_2 values from the tuple.
+// tie(external_flag_1, external_flag_2) = GetParam();
+// }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+// // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
+// Combine(Bool(), Bool()));
+//
+template <typename Generator1, typename Generator2>
+internal::CartesianProductHolder2<Generator1, Generator2> Combine(
+ const Generator1& g1, const Generator2& g2) {
+ return internal::CartesianProductHolder2<Generator1, Generator2>(
+ g1, g2);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3>
+internal::CartesianProductHolder3<Generator1, Generator2, Generator3> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3) {
+ return internal::CartesianProductHolder3<Generator1, Generator2, Generator3>(
+ g1, g2, g3);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4>
+internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4) {
+ return internal::CartesianProductHolder4<Generator1, Generator2, Generator3,
+ Generator4>(
+ g1, g2, g3, g4);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5>
+internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5) {
+ return internal::CartesianProductHolder5<Generator1, Generator2, Generator3,
+ Generator4, Generator5>(
+ g1, g2, g3, g4, g5);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6>
+internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6) {
+ return internal::CartesianProductHolder6<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6>(
+ g1, g2, g3, g4, g5, g6);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7>
+internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7) {
+ return internal::CartesianProductHolder7<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7>(
+ g1, g2, g3, g4, g5, g6, g7);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8>
+internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8) {
+ return internal::CartesianProductHolder8<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8>(
+ g1, g2, g3, g4, g5, g6, g7, g8);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9>
+internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8,
+ Generator9> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9) {
+ return internal::CartesianProductHolder9<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9);
+}
+
+template <typename Generator1, typename Generator2, typename Generator3,
+ typename Generator4, typename Generator5, typename Generator6,
+ typename Generator7, typename Generator8, typename Generator9,
+ typename Generator10>
+internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10> Combine(
+ const Generator1& g1, const Generator2& g2, const Generator3& g3,
+ const Generator4& g4, const Generator5& g5, const Generator6& g6,
+ const Generator7& g7, const Generator8& g8, const Generator9& g9,
+ const Generator10& g10) {
+ return internal::CartesianProductHolder10<Generator1, Generator2, Generator3,
+ Generator4, Generator5, Generator6, Generator7, Generator8, Generator9,
+ Generator10>(
+ g1, g2, g3, g4, g5, g6, g7, g8, g9, g10);
+}
+# endif // GTEST_HAS_COMBINE
+
+
+
+# define TEST_P(test_case_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ : public test_case_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+ virtual void TestBody(); \
+ private: \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, \
+ ::testing::internal::CodeLocation(\
+ __FILE__, __LINE__))->AddTestPattern(\
+ #test_case_name, \
+ #test_name, \
+ new ::testing::internal::TestMetaFactory< \
+ GTEST_TEST_CLASS_NAME_(\
+ test_case_name, test_name)>()); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_case_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+// The optional last argument to INSTANTIATE_TEST_CASE_P allows the user
+// to specify a function or functor that generates custom test name suffixes
+// based on the test parameters. The function should accept one argument of
+// type testing::TestParamInfo<class ParamType>, and return std::string.
+//
+// testing::PrintToStringParamName is a builtin test suffix generator that
+// returns the value of testing::PrintToString(GetParam()). It does not work
+// for std::string or C strings.
+//
+// Note: test names must be non-empty, unique, and may only contain ASCII
+// alphanumeric characters or underscore.
+
+# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator, ...) \
+ ::testing::internal::ParamGenerator<test_case_name::ParamType> \
+ gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
+ ::std::string gtest_##prefix##test_case_name##_EvalGenerateName_( \
+ const ::testing::TestParamInfo<test_case_name::ParamType>& info) { \
+ return ::testing::internal::GetParamNameGen<test_case_name::ParamType> \
+ (__VA_ARGS__)(info); \
+ } \
+ int gtest_##prefix##test_case_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, \
+ ::testing::internal::CodeLocation(\
+ __FILE__, __LINE__))->AddTestCaseInstantiation(\
+ #prefix, \
+ &gtest_##prefix##test_case_name##_EvalGenerator_, \
+ &gtest_##prefix##test_case_name##_EvalGenerateName_, \
+ __FILE__, __LINE__)
+
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h.pump b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h.pump
new file mode 100644
index 000000000..3078d6d2a
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-param-test.h.pump
@@ -0,0 +1,510 @@
+$$ -*- mode: c++; -*-
+$var n = 50 $$ Maximum length of Values arguments we want to support.
+$var maxtuple = 10 $$ Maximum number of Combine arguments we want to support.
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: vladl@google.com (Vlad Losev)
+//
+// Macros and functions for implementing parameterized tests
+// in Google C++ Testing Framework (Google Test)
+//
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
+
+
+// Value-parameterized tests allow you to test your code with different
+// parameters without writing multiple copies of the same test.
+//
+// Here is how you use value-parameterized tests:
+
+#if 0
+
+// To write value-parameterized tests, first you should define a fixture
+// class. It is usually derived from testing::TestWithParam<T> (see below for
+// another inheritance scheme that's sometimes useful in more complicated
+// class hierarchies), where the type of your parameter values.
+// TestWithParam<T> is itself derived from testing::Test. T can be any
+// copyable type. If it's a raw pointer, you are responsible for managing the
+// lifespan of the pointed values.
+
+class FooTest : public ::testing::TestWithParam<const char*> {
+ // You can implement all the usual class fixture members here.
+};
+
+// Then, use the TEST_P macro to define as many parameterized tests
+// for this fixture as you want. The _P suffix is for "parameterized"
+// or "pattern", whichever you prefer to think.
+
+TEST_P(FooTest, DoesBlah) {
+ // Inside a test, access the test parameter with the GetParam() method
+ // of the TestWithParam<T> class:
+ EXPECT_TRUE(foo.Blah(GetParam()));
+ ...
+}
+
+TEST_P(FooTest, HasBlahBlah) {
+ ...
+}
+
+// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test
+// case with any set of parameters you want. Google Test defines a number
+// of functions for generating test parameters. They return what we call
+// (surprise!) parameter generators. Here is a summary of them, which
+// are all in the testing namespace:
+//
+//
+// Range(begin, end [, step]) - Yields values {begin, begin+step,
+// begin+step+step, ...}. The values do not
+// include end. step defaults to 1.
+// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}.
+// ValuesIn(container) - Yields values from a C-style array, an STL
+// ValuesIn(begin,end) container, or an iterator range [begin, end).
+// Bool() - Yields sequence {false, true}.
+// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product
+// for the math savvy) of the values generated
+// by the N generators.
+//
+// For more details, see comments at the definitions of these functions below
+// in this file.
+//
+// The following statement will instantiate tests from the FooTest test case
+// each with parameter values "meeny", "miny", and "moe".
+
+INSTANTIATE_TEST_CASE_P(InstantiationName,
+ FooTest,
+ Values("meeny", "miny", "moe"));
+
+// To distinguish different instances of the pattern, (yes, you
+// can instantiate it more then once) the first argument to the
+// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the
+// actual test case name. Remember to pick unique prefixes for different
+// instantiations. The tests from the instantiation above will have
+// these names:
+//
+// * InstantiationName/FooTest.DoesBlah/0 for "meeny"
+// * InstantiationName/FooTest.DoesBlah/1 for "miny"
+// * InstantiationName/FooTest.DoesBlah/2 for "moe"
+// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny"
+// * InstantiationName/FooTest.HasBlahBlah/1 for "miny"
+// * InstantiationName/FooTest.HasBlahBlah/2 for "moe"
+//
+// You can use these names in --gtest_filter.
+//
+// This statement will instantiate all tests from FooTest again, each
+// with parameter values "cat" and "dog":
+
+const char* pets[] = {"cat", "dog"};
+INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets));
+
+// The tests from the instantiation above will have these names:
+//
+// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat"
+// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog"
+//
+// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests
+// in the given test case, whether their definitions come before or
+// AFTER the INSTANTIATE_TEST_CASE_P statement.
+//
+// Please also note that generator expressions (including parameters to the
+// generators) are evaluated in InitGoogleTest(), after main() has started.
+// This allows the user on one hand, to adjust generator parameters in order
+// to dynamically determine a set of tests to run and on the other hand,
+// give the user a chance to inspect the generated tests with Google Test
+// reflection API before RUN_ALL_TESTS() is executed.
+//
+// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc
+// for more examples.
+//
+// In the future, we plan to publish the API for defining new parameter
+// generators. But for now this interface remains part of the internal
+// implementation and is subject to change.
+//
+//
+// A parameterized test fixture must be derived from testing::Test and from
+// testing::WithParamInterface<T>, where T is the type of the parameter
+// values. Inheriting from TestWithParam<T> satisfies that requirement because
+// TestWithParam<T> inherits from both Test and WithParamInterface. In more
+// complicated hierarchies, however, it is occasionally useful to inherit
+// separately from Test and WithParamInterface. For example:
+
+class BaseTest : public ::testing::Test {
+ // You can inherit all the usual members for a non-parameterized test
+ // fixture here.
+};
+
+class DerivedTest : public BaseTest, public ::testing::WithParamInterface<int> {
+ // The usual test fixture members go here too.
+};
+
+TEST_F(BaseTest, HasFoo) {
+ // This is an ordinary non-parameterized test.
+}
+
+TEST_P(DerivedTest, DoesBlah) {
+ // GetParam works just the same here as if you inherit from TestWithParam.
+ EXPECT_TRUE(foo.Blah(GetParam()));
+}
+
+#endif // 0
+
+#include "gtest/internal/gtest-port.h"
+
+#if !GTEST_OS_SYMBIAN
+# include <utility>
+#endif
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include "gtest/internal/gtest-internal.h"
+#include "gtest/internal/gtest-param-util.h"
+#include "gtest/internal/gtest-param-util-generated.h"
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Functions producing parameter generators.
+//
+// Google Test uses these generators to produce parameters for value-
+// parameterized tests. When a parameterized test case is instantiated
+// with a particular generator, Google Test creates and runs tests
+// for each element in the sequence produced by the generator.
+//
+// In the following sample, tests from test case FooTest are instantiated
+// each three times with parameter values 3, 5, and 8:
+//
+// class FooTest : public TestWithParam<int> { ... };
+//
+// TEST_P(FooTest, TestThis) {
+// }
+// TEST_P(FooTest, TestThat) {
+// }
+// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8));
+//
+
+// Range() returns generators providing sequences of values in a range.
+//
+// Synopsis:
+// Range(start, end)
+// - returns a generator producing a sequence of values {start, start+1,
+// start+2, ..., }.
+// Range(start, end, step)
+// - returns a generator producing a sequence of values {start, start+step,
+// start+step+step, ..., }.
+// Notes:
+// * The generated sequences never include end. For example, Range(1, 5)
+// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2)
+// returns a generator producing {1, 3, 5, 7}.
+// * start and end must have the same type. That type may be any integral or
+// floating-point type or a user defined type satisfying these conditions:
+// * It must be assignable (have operator=() defined).
+// * It must have operator+() (operator+(int-compatible type) for
+// two-operand version).
+// * It must have operator<() defined.
+// Elements in the resulting sequences will also have that type.
+// * Condition start < end must be satisfied in order for resulting sequences
+// to contain any elements.
+//
+template <typename T, typename IncrementT>
+internal::ParamGenerator<T> Range(T start, T end, IncrementT step) {
+ return internal::ParamGenerator<T>(
+ new internal::RangeGenerator<T, IncrementT>(start, end, step));
+}
+
+template <typename T>
+internal::ParamGenerator<T> Range(T start, T end) {
+ return Range(start, end, 1);
+}
+
+// ValuesIn() function allows generation of tests with parameters coming from
+// a container.
+//
+// Synopsis:
+// ValuesIn(const T (&array)[N])
+// - returns a generator producing sequences with elements from
+// a C-style array.
+// ValuesIn(const Container& container)
+// - returns a generator producing sequences with elements from
+// an STL-style container.
+// ValuesIn(Iterator begin, Iterator end)
+// - returns a generator producing sequences with elements from
+// a range [begin, end) defined by a pair of STL-style iterators. These
+// iterators can also be plain C pointers.
+//
+// Please note that ValuesIn copies the values from the containers
+// passed in and keeps them to generate tests in RUN_ALL_TESTS().
+//
+// Examples:
+//
+// This instantiates tests from test case StringTest
+// each with C-string values of "foo", "bar", and "baz":
+//
+// const char* strings[] = {"foo", "bar", "baz"};
+// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings));
+//
+// This instantiates tests from test case StlStringTest
+// each with STL strings with values "a" and "b":
+//
+// ::std::vector< ::std::string> GetParameterStrings() {
+// ::std::vector< ::std::string> v;
+// v.push_back("a");
+// v.push_back("b");
+// return v;
+// }
+//
+// INSTANTIATE_TEST_CASE_P(CharSequence,
+// StlStringTest,
+// ValuesIn(GetParameterStrings()));
+//
+//
+// This will also instantiate tests from CharTest
+// each with parameter values 'a' and 'b':
+//
+// ::std::list<char> GetParameterChars() {
+// ::std::list<char> list;
+// list.push_back('a');
+// list.push_back('b');
+// return list;
+// }
+// ::std::list<char> l = GetParameterChars();
+// INSTANTIATE_TEST_CASE_P(CharSequence2,
+// CharTest,
+// ValuesIn(l.begin(), l.end()));
+//
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end) {
+ typedef typename ::testing::internal::IteratorTraits<ForwardIterator>
+ ::value_type ParamType;
+ return internal::ParamGenerator<ParamType>(
+ new internal::ValuesInIteratorRangeGenerator<ParamType>(begin, end));
+}
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]) {
+ return ValuesIn(array, array + N);
+}
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container) {
+ return ValuesIn(container.begin(), container.end());
+}
+
+// Values() allows generating tests from explicitly specified list of
+// parameters.
+//
+// Synopsis:
+// Values(T v1, T v2, ..., T vN)
+// - returns a generator producing sequences with elements v1, v2, ..., vN.
+//
+// For example, this instantiates tests from test case BarTest each
+// with values "one", "two", and "three":
+//
+// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three"));
+//
+// This instantiates tests from test case BazTest each with values 1, 2, 3.5.
+// The exact type of values will depend on the type of parameter in BazTest.
+//
+// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5));
+//
+// Currently, Values() supports from 1 to $n parameters.
+//
+$range i 1..n
+$for i [[
+$range j 1..i
+
+template <$for j, [[typename T$j]]>
+internal::ValueArray$i<$for j, [[T$j]]> Values($for j, [[T$j v$j]]) {
+ return internal::ValueArray$i<$for j, [[T$j]]>($for j, [[v$j]]);
+}
+
+]]
+
+// Bool() allows generating tests with parameters in a set of (false, true).
+//
+// Synopsis:
+// Bool()
+// - returns a generator producing sequences with elements {false, true}.
+//
+// It is useful when testing code that depends on Boolean flags. Combinations
+// of multiple flags can be tested when several Bool()'s are combined using
+// Combine() function.
+//
+// In the following example all tests in the test case FlagDependentTest
+// will be instantiated twice with parameters false and true.
+//
+// class FlagDependentTest : public testing::TestWithParam<bool> {
+// virtual void SetUp() {
+// external_flag = GetParam();
+// }
+// }
+// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool());
+//
+inline internal::ParamGenerator<bool> Bool() {
+ return Values(false, true);
+}
+
+# if GTEST_HAS_COMBINE
+// Combine() allows the user to combine two or more sequences to produce
+// values of a Cartesian product of those sequences' elements.
+//
+// Synopsis:
+// Combine(gen1, gen2, ..., genN)
+// - returns a generator producing sequences with elements coming from
+// the Cartesian product of elements from the sequences generated by
+// gen1, gen2, ..., genN. The sequence elements will have a type of
+// tuple<T1, T2, ..., TN> where T1, T2, ..., TN are the types
+// of elements from sequences produces by gen1, gen2, ..., genN.
+//
+// Combine can have up to $maxtuple arguments. This number is currently limited
+// by the maximum number of elements in the tuple implementation used by Google
+// Test.
+//
+// Example:
+//
+// This will instantiate tests in test case AnimalTest each one with
+// the parameter values tuple("cat", BLACK), tuple("cat", WHITE),
+// tuple("dog", BLACK), and tuple("dog", WHITE):
+//
+// enum Color { BLACK, GRAY, WHITE };
+// class AnimalTest
+// : public testing::TestWithParam<tuple<const char*, Color> > {...};
+//
+// TEST_P(AnimalTest, AnimalLooksNice) {...}
+//
+// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest,
+// Combine(Values("cat", "dog"),
+// Values(BLACK, WHITE)));
+//
+// This will instantiate tests in FlagDependentTest with all variations of two
+// Boolean flags:
+//
+// class FlagDependentTest
+// : public testing::TestWithParam<tuple<bool, bool> > {
+// virtual void SetUp() {
+// // Assigns external_flag_1 and external_flag_2 values from the tuple.
+// tie(external_flag_1, external_flag_2) = GetParam();
+// }
+// };
+//
+// TEST_P(FlagDependentTest, TestFeature1) {
+// // Test your code using external_flag_1 and external_flag_2 here.
+// }
+// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest,
+// Combine(Bool(), Bool()));
+//
+$range i 2..maxtuple
+$for i [[
+$range j 1..i
+
+template <$for j, [[typename Generator$j]]>
+internal::CartesianProductHolder$i<$for j, [[Generator$j]]> Combine(
+ $for j, [[const Generator$j& g$j]]) {
+ return internal::CartesianProductHolder$i<$for j, [[Generator$j]]>(
+ $for j, [[g$j]]);
+}
+
+]]
+# endif // GTEST_HAS_COMBINE
+
+
+
+# define TEST_P(test_case_name, test_name) \
+ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ : public test_case_name { \
+ public: \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \
+ virtual void TestBody(); \
+ private: \
+ static int AddToRegistry() { \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, \
+ ::testing::internal::CodeLocation(\
+ __FILE__, __LINE__))->AddTestPattern(\
+ #test_case_name, \
+ #test_name, \
+ new ::testing::internal::TestMetaFactory< \
+ GTEST_TEST_CLASS_NAME_(\
+ test_case_name, test_name)>()); \
+ return 0; \
+ } \
+ static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \
+ }; \
+ int GTEST_TEST_CLASS_NAME_(test_case_name, \
+ test_name)::gtest_registering_dummy_ = \
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \
+ void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+// The optional last argument to INSTANTIATE_TEST_CASE_P allows the user
+// to specify a function or functor that generates custom test name suffixes
+// based on the test parameters. The function should accept one argument of
+// type testing::TestParamInfo<class ParamType>, and return std::string.
+//
+// testing::PrintToStringParamName is a builtin test suffix generator that
+// returns the value of testing::PrintToString(GetParam()).
+//
+// Note: test names must be non-empty, unique, and may only contain ASCII
+// alphanumeric characters or underscore. Because PrintToString adds quotes
+// to std::string and C strings, it won't work for these types.
+
+# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator, ...) \
+ ::testing::internal::ParamGenerator<test_case_name::ParamType> \
+ gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \
+ ::std::string gtest_##prefix##test_case_name##_EvalGenerateName_( \
+ const ::testing::TestParamInfo<test_case_name::ParamType>& info) { \
+ return ::testing::internal::GetParamNameGen<test_case_name::ParamType> \
+ (__VA_ARGS__)(info); \
+ } \
+ int gtest_##prefix##test_case_name##_dummy_ GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \
+ GetTestCasePatternHolder<test_case_name>(\
+ #test_case_name, \
+ ::testing::internal::CodeLocation(\
+ __FILE__, __LINE__))->AddTestCaseInstantiation(\
+ #prefix, \
+ &gtest_##prefix##test_case_name##_EvalGenerator_, \
+ &gtest_##prefix##test_case_name##_EvalGenerateName_, \
+ __FILE__, __LINE__)
+
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-printers.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-printers.h
new file mode 100644
index 000000000..8a33164cb
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-printers.h
@@ -0,0 +1,993 @@
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// A user can teach this function how to print a class type T by
+// defining either operator<<() or PrintTo() in the namespace that
+// defines T. More specifically, the FIRST defined function in the
+// following list will be used (assuming T is defined in namespace
+// foo):
+//
+// 1. foo::PrintTo(const T&, ostream*)
+// 2. operator<<(ostream&, const T&) defined in either foo or the
+// global namespace.
+//
+// If none of the above is defined, it will print the debug string of
+// the value if it is a protocol buffer, or print the raw bytes in the
+// value otherwise.
+//
+// To aid debugging: when T is a reference type, the address of the
+// value is also printed; when T is a (const) char pointer, both the
+// pointer value and the NUL-terminated string it points to are
+// printed.
+//
+// We also provide some convenient wrappers:
+//
+// // Prints a value to a string. For a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// std::string ::testing::PrintToString(const T& value);
+//
+// // Prints a value tersely: for a reference type, the referenced
+// // value (but not the address) is printed; for a (const or not) char
+// // pointer, the NUL-terminated string (but not the pointer) is
+// // printed.
+// void ::testing::internal::UniversalTersePrint(const T& value, ostream*);
+//
+// // Prints value using the type inferred by the compiler. The difference
+// // from UniversalTersePrint() is that this function prints both the
+// // pointer and the NUL-terminated string for a (const or not) char pointer.
+// void ::testing::internal::UniversalPrint(const T& value, ostream*);
+//
+// // Prints the fields of a tuple tersely to a string vector, one
+// // element for each field. Tuple support must be enabled in
+// // gtest-port.h.
+// std::vector<string> UniversalTersePrintTupleFieldsToStrings(
+// const Tuple& value);
+//
+// Known limitation:
+//
+// The print primitives print the elements of an STL-style container
+// using the compiler-inferred type of *iter where iter is a
+// const_iterator of the container. When const_iterator is an input
+// iterator but not a forward iterator, this inferred type may not
+// match value_type, and the print output may be incorrect. In
+// practice, this is rarely a problem as for most containers
+// const_iterator is a forward iterator. We'll fix this if there's an
+// actual need for it. Note that this fix cannot rely on value_type
+// being defined as many user-defined container types don't have
+// value_type.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
+
+#include <ostream> // NOLINT
+#include <sstream>
+#include <string>
+#include <utility>
+#include <vector>
+#include "gtest/internal/gtest-port.h"
+#include "gtest/internal/gtest-internal.h"
+
+#if GTEST_HAS_STD_TUPLE_
+# include <tuple>
+#endif
+
+namespace testing {
+
+// Definitions in the 'internal' and 'internal2' name spaces are
+// subject to change without notice. DO NOT USE THEM IN USER CODE!
+namespace internal2 {
+
+// Prints the given number of bytes in the given object to the given
+// ostream.
+GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes,
+ size_t count,
+ ::std::ostream* os);
+
+// For selecting which printer to use when a given type has neither <<
+// nor PrintTo().
+enum TypeKind {
+ kProtobuf, // a protobuf type
+ kConvertibleToInteger, // a type implicitly convertible to BiggestInt
+ // (e.g. a named or unnamed enum type)
+ kOtherType // anything else
+};
+
+// TypeWithoutFormatter<T, kTypeKind>::PrintValue(value, os) is called
+// by the universal printer to print a value of type T when neither
+// operator<< nor PrintTo() is defined for T, where kTypeKind is the
+// "kind" of T as defined by enum TypeKind.
+template <typename T, TypeKind kTypeKind>
+class TypeWithoutFormatter {
+ public:
+ // This default version is called when kTypeKind is kOtherType.
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ PrintBytesInObjectTo(reinterpret_cast<const unsigned char*>(&value),
+ sizeof(value), os);
+ }
+};
+
+// We print a protobuf using its ShortDebugString() when the string
+// doesn't exceed this many characters; otherwise we print it using
+// DebugString() for better readability.
+const size_t kProtobufOneLinerMaxLength = 50;
+
+template <typename T>
+class TypeWithoutFormatter<T, kProtobuf> {
+ public:
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ const ::testing::internal::string short_str = value.ShortDebugString();
+ const ::testing::internal::string pretty_str =
+ short_str.length() <= kProtobufOneLinerMaxLength ?
+ short_str : ("\n" + value.DebugString());
+ *os << ("<" + pretty_str + ">");
+ }
+};
+
+template <typename T>
+class TypeWithoutFormatter<T, kConvertibleToInteger> {
+ public:
+ // Since T has no << operator or PrintTo() but can be implicitly
+ // converted to BiggestInt, we print it as a BiggestInt.
+ //
+ // Most likely T is an enum type (either named or unnamed), in which
+ // case printing it as an integer is the desired behavior. In case
+ // T is not an enum, printing it as an integer is the best we can do
+ // given that it has no user-defined printer.
+ static void PrintValue(const T& value, ::std::ostream* os) {
+ const internal::BiggestInt kBigInt = value;
+ *os << kBigInt;
+ }
+};
+
+// Prints the given value to the given ostream. If the value is a
+// protocol message, its debug string is printed; if it's an enum or
+// of a type implicitly convertible to BiggestInt, it's printed as an
+// integer; otherwise the bytes in the value are printed. This is
+// what UniversalPrinter<T>::Print() does when it knows nothing about
+// type T and T has neither << operator nor PrintTo().
+//
+// A user can override this behavior for a class type Foo by defining
+// a << operator in the namespace where Foo is defined.
+//
+// We put this operator in namespace 'internal2' instead of 'internal'
+// to simplify the implementation, as much code in 'internal' needs to
+// use << in STL, which would conflict with our own << were it defined
+// in 'internal'.
+//
+// Note that this operator<< takes a generic std::basic_ostream<Char,
+// CharTraits> type instead of the more restricted std::ostream. If
+// we define it to take an std::ostream instead, we'll get an
+// "ambiguous overloads" compiler error when trying to print a type
+// Foo that supports streaming to std::basic_ostream<Char,
+// CharTraits>, as the compiler cannot tell whether
+// operator<<(std::ostream&, const T&) or
+// operator<<(std::basic_stream<Char, CharTraits>, const Foo&) is more
+// specific.
+template <typename Char, typename CharTraits, typename T>
+::std::basic_ostream<Char, CharTraits>& operator<<(
+ ::std::basic_ostream<Char, CharTraits>& os, const T& x) {
+ TypeWithoutFormatter<T,
+ (internal::IsAProtocolMessage<T>::value ? kProtobuf :
+ internal::ImplicitlyConvertible<const T&, internal::BiggestInt>::value ?
+ kConvertibleToInteger : kOtherType)>::PrintValue(x, &os);
+ return os;
+}
+
+} // namespace internal2
+} // namespace testing
+
+// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up
+// magic needed for implementing UniversalPrinter won't work.
+namespace testing_internal {
+
+// Used to print a value that is not an STL-style container when the
+// user doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) {
+ // With the following statement, during unqualified name lookup,
+ // testing::internal2::operator<< appears as if it was declared in
+ // the nearest enclosing namespace that contains both
+ // ::testing_internal and ::testing::internal2, i.e. the global
+ // namespace. For more details, refer to the C++ Standard section
+ // 7.3.4-1 [namespace.udir]. This allows us to fall back onto
+ // testing::internal2::operator<< in case T doesn't come with a <<
+ // operator.
+ //
+ // We cannot write 'using ::testing::internal2::operator<<;', which
+ // gcc 3.3 fails to compile due to a compiler bug.
+ using namespace ::testing::internal2; // NOLINT
+
+ // Assuming T is defined in namespace foo, in the next statement,
+ // the compiler will consider all of:
+ //
+ // 1. foo::operator<< (thanks to Koenig look-up),
+ // 2. ::operator<< (as the current namespace is enclosed in ::),
+ // 3. testing::internal2::operator<< (thanks to the using statement above).
+ //
+ // The operator<< whose type matches T best will be picked.
+ //
+ // We deliberately allow #2 to be a candidate, as sometimes it's
+ // impossible to define #1 (e.g. when foo is ::std, defining
+ // anything in it is undefined behavior unless you are a compiler
+ // vendor.).
+ *os << value;
+}
+
+} // namespace testing_internal
+
+namespace testing {
+namespace internal {
+
+// FormatForComparison<ToPrint, OtherOperand>::Format(value) formats a
+// value of type ToPrint that is an operand of a comparison assertion
+// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in
+// the comparison, and is used to help determine the best way to
+// format the value. In particular, when the value is a C string
+// (char pointer) and the other operand is an STL string object, we
+// want to format the C string as a string, since we know it is
+// compared by value with the string object. If the value is a char
+// pointer but the other operand is not an STL string object, we don't
+// know whether the pointer is supposed to point to a NUL-terminated
+// string, and thus want to print it as a pointer to be safe.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// The default case.
+template <typename ToPrint, typename OtherOperand>
+class FormatForComparison {
+ public:
+ static ::std::string Format(const ToPrint& value) {
+ return ::testing::PrintToString(value);
+ }
+};
+
+// Array.
+template <typename ToPrint, size_t N, typename OtherOperand>
+class FormatForComparison<ToPrint[N], OtherOperand> {
+ public:
+ static ::std::string Format(const ToPrint* value) {
+ return FormatForComparison<const ToPrint*, OtherOperand>::Format(value);
+ }
+};
+
+// By default, print C string as pointers to be safe, as we don't know
+// whether they actually point to a NUL-terminated string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \
+ template <typename OtherOperand> \
+ class FormatForComparison<CharType*, OtherOperand> { \
+ public: \
+ static ::std::string Format(CharType* value) { \
+ return ::testing::PrintToString(static_cast<const void*>(value)); \
+ } \
+ }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t);
+GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t);
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_
+
+// If a C string is compared with an STL string object, we know it's meant
+// to point to a NUL-terminated string, and thus can print it as a string.
+
+#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \
+ template <> \
+ class FormatForComparison<CharType*, OtherStringType> { \
+ public: \
+ static ::std::string Format(CharType* value) { \
+ return ::testing::PrintToString(value); \
+ } \
+ }
+
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string);
+
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string);
+#endif
+
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring);
+#endif
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring);
+GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring);
+#endif
+
+#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_
+
+// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc)
+// operand to be used in a failure message. The type (but not value)
+// of the other operand may affect the format. This allows us to
+// print a char* as a raw pointer when it is compared against another
+// char* or void*, and print it as a C string when it is compared
+// against an std::string object, for example.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename T1, typename T2>
+std::string FormatForComparisonFailureMessage(
+ const T1& value, const T2& /* other_operand */) {
+ return FormatForComparison<T1, T2>::Format(value);
+}
+
+// UniversalPrinter<T>::Print(value, ostream_ptr) prints the given
+// value to the given ostream. The caller must ensure that
+// 'ostream_ptr' is not NULL, or the behavior is undefined.
+//
+// We define UniversalPrinter as a class template (as opposed to a
+// function template), as we need to partially specialize it for
+// reference types, which cannot be done with function templates.
+template <typename T>
+class UniversalPrinter;
+
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os);
+
+// Used to print an STL-style container when the user doesn't define
+// a PrintTo() for it.
+template <typename C>
+void DefaultPrintTo(IsContainer /* dummy */,
+ false_type /* is not a pointer */,
+ const C& container, ::std::ostream* os) {
+ const size_t kMaxCount = 32; // The maximum number of elements to print.
+ *os << '{';
+ size_t count = 0;
+ for (typename C::const_iterator it = container.begin();
+ it != container.end(); ++it, ++count) {
+ if (count > 0) {
+ *os << ',';
+ if (count == kMaxCount) { // Enough has been printed.
+ *os << " ...";
+ break;
+ }
+ }
+ *os << ' ';
+ // We cannot call PrintTo(*it, os) here as PrintTo() doesn't
+ // handle *it being a native array.
+ internal::UniversalPrint(*it, os);
+ }
+
+ if (count > 0) {
+ *os << ' ';
+ }
+ *os << '}';
+}
+
+// Used to print a pointer that is neither a char pointer nor a member
+// pointer, when the user doesn't define PrintTo() for it. (A member
+// variable pointer or member function pointer doesn't really point to
+// a location in the address space. Their representation is
+// implementation-defined. Therefore they will be printed as raw
+// bytes.)
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+ true_type /* is a pointer */,
+ T* p, ::std::ostream* os) {
+ if (p == NULL) {
+ *os << "NULL";
+ } else {
+ // C++ doesn't allow casting from a function pointer to any object
+ // pointer.
+ //
+ // IsTrue() silences warnings: "Condition is always true",
+ // "unreachable code".
+ if (IsTrue(ImplicitlyConvertible<T*, const void*>::value)) {
+ // T is not a function type. We just call << to print p,
+ // relying on ADL to pick up user-defined << for their pointer
+ // types, if any.
+ *os << p;
+ } else {
+ // T is a function type, so '*os << p' doesn't do what we want
+ // (it just prints p as bool). We want to print p as a const
+ // void*. However, we cannot cast it to const void* directly,
+ // even using reinterpret_cast, as earlier versions of gcc
+ // (e.g. 3.4.5) cannot compile the cast when p is a function
+ // pointer. Casting to UInt64 first solves the problem.
+ *os << reinterpret_cast<const void*>(
+ reinterpret_cast<internal::UInt64>(p));
+ }
+ }
+}
+
+// Used to print a non-container, non-pointer value when the user
+// doesn't define PrintTo() for it.
+template <typename T>
+void DefaultPrintTo(IsNotContainer /* dummy */,
+ false_type /* is not a pointer */,
+ const T& value, ::std::ostream* os) {
+ ::testing_internal::DefaultPrintNonContainerTo(value, os);
+}
+
+// Prints the given value using the << operator if it has one;
+// otherwise prints the bytes in it. This is what
+// UniversalPrinter<T>::Print() does when PrintTo() is not specialized
+// or overloaded for type T.
+//
+// A user can override this behavior for a class type Foo by defining
+// an overload of PrintTo() in the namespace where Foo is defined. We
+// give the user this option as sometimes defining a << operator for
+// Foo is not desirable (e.g. the coding style may prevent doing it,
+// or there is already a << operator but it doesn't do what the user
+// wants).
+template <typename T>
+void PrintTo(const T& value, ::std::ostream* os) {
+ // DefaultPrintTo() is overloaded. The type of its first two
+ // arguments determine which version will be picked. If T is an
+ // STL-style container, the version for container will be called; if
+ // T is a pointer, the pointer version will be called; otherwise the
+ // generic version will be called.
+ //
+ // Note that we check for container types here, prior to we check
+ // for protocol message types in our operator<<. The rationale is:
+ //
+ // For protocol messages, we want to give people a chance to
+ // override Google Mock's format by defining a PrintTo() or
+ // operator<<. For STL containers, other formats can be
+ // incompatible with Google Mock's format for the container
+ // elements; therefore we check for container types here to ensure
+ // that our format is used.
+ //
+ // The second argument of DefaultPrintTo() is needed to bypass a bug
+ // in Symbian's C++ compiler that prevents it from picking the right
+ // overload between:
+ //
+ // PrintTo(const T& x, ...);
+ // PrintTo(T* x, ...);
+ DefaultPrintTo(IsContainerTest<T>(0), is_pointer<T>(), value, os);
+}
+
+// The following list of PrintTo() overloads tells
+// UniversalPrinter<T>::Print() how to print standard types (built-in
+// types, strings, plain arrays, and pointers).
+
+// Overloads for various char types.
+GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os);
+GTEST_API_ void PrintTo(signed char c, ::std::ostream* os);
+inline void PrintTo(char c, ::std::ostream* os) {
+ // When printing a plain char, we always treat it as unsigned. This
+ // way, the output won't be affected by whether the compiler thinks
+ // char is signed or not.
+ PrintTo(static_cast<unsigned char>(c), os);
+}
+
+// Overloads for other simple built-in types.
+inline void PrintTo(bool x, ::std::ostream* os) {
+ *os << (x ? "true" : "false");
+}
+
+// Overload for wchar_t type.
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its decimal code (except for L'\0').
+// The L'\0' char is printed as "L'\\0'". The decimal code is printed
+// as signed integer when wchar_t is implemented by the compiler
+// as a signed type and is printed as an unsigned integer when wchar_t
+// is implemented as an unsigned type.
+GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os);
+
+// Overloads for C strings.
+GTEST_API_ void PrintTo(const char* s, ::std::ostream* os);
+inline void PrintTo(char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const char*>(s), os);
+}
+
+// signed/unsigned char is often used for representing binary data, so
+// we print pointers to it as void* to be safe.
+inline void PrintTo(const signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(signed char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(const unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+inline void PrintTo(unsigned char* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const void*>(s), os);
+}
+
+// MSVC can be configured to define wchar_t as a typedef of unsigned
+// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native
+// type. When wchar_t is a typedef, defining an overload for const
+// wchar_t* would cause unsigned short* be printed as a wide string,
+// possibly causing invalid memory accesses.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Overloads for wide C strings
+GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os);
+inline void PrintTo(wchar_t* s, ::std::ostream* os) {
+ PrintTo(ImplicitCast_<const wchar_t*>(s), os);
+}
+#endif
+
+// Overload for C arrays. Multi-dimensional arrays are printed
+// properly.
+
+// Prints the given number of elements in an array, without printing
+// the curly braces.
+template <typename T>
+void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) {
+ UniversalPrint(a[0], os);
+ for (size_t i = 1; i != count; i++) {
+ *os << ", ";
+ UniversalPrint(a[i], os);
+ }
+}
+
+// Overloads for ::string and ::std::string.
+#if GTEST_HAS_GLOBAL_STRING
+GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os);
+inline void PrintTo(const ::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+#endif // GTEST_HAS_GLOBAL_STRING
+
+GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os);
+inline void PrintTo(const ::std::string& s, ::std::ostream* os) {
+ PrintStringTo(s, os);
+}
+
+// Overloads for ::wstring and ::std::wstring.
+#if GTEST_HAS_GLOBAL_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os);
+inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) {
+ PrintWideStringTo(s, os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+// Helper function for printing a tuple. T must be instantiated with
+// a tuple type.
+template <typename T>
+void PrintTupleTo(const T& t, ::std::ostream* os);
+#endif // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+
+#if GTEST_HAS_TR1_TUPLE
+// Overload for ::std::tr1::tuple. Needed for printing function arguments,
+// which are packed as tuples.
+
+// Overloaded PrintTo() for tuples of various arities. We support
+// tuples of up-to 10 fields. The following implementation works
+// regardless of whether tr1::tuple is implemented using the
+// non-standard variadic template feature or not.
+
+inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1>
+void PrintTo(const ::std::tr1::tuple<T1>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2>
+void PrintTo(const ::std::tr1::tuple<T1, T2>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+void PrintTo(const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+void PrintTo(
+ const ::std::tr1::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10>& t,
+ ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+#endif // GTEST_HAS_TR1_TUPLE
+
+#if GTEST_HAS_STD_TUPLE_
+template <typename... Types>
+void PrintTo(const ::std::tuple<Types...>& t, ::std::ostream* os) {
+ PrintTupleTo(t, os);
+}
+#endif // GTEST_HAS_STD_TUPLE_
+
+// Overload for std::pair.
+template <typename T1, typename T2>
+void PrintTo(const ::std::pair<T1, T2>& value, ::std::ostream* os) {
+ *os << '(';
+ // We cannot use UniversalPrint(value.first, os) here, as T1 may be
+ // a reference type. The same for printing value.second.
+ UniversalPrinter<T1>::Print(value.first, os);
+ *os << ", ";
+ UniversalPrinter<T2>::Print(value.second, os);
+ *os << ')';
+}
+
+// Implements printing a non-reference type T by letting the compiler
+// pick the right overload of PrintTo() for T.
+template <typename T>
+class UniversalPrinter {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
+
+ // Note: we deliberately don't call this PrintTo(), as that name
+ // conflicts with ::testing::internal::PrintTo in the body of the
+ // function.
+ static void Print(const T& value, ::std::ostream* os) {
+ // By default, ::testing::internal::PrintTo() is used for printing
+ // the value.
+ //
+ // Thanks to Koenig look-up, if T is a class and has its own
+ // PrintTo() function defined in its namespace, that function will
+ // be visible here. Since it is more specific than the generic ones
+ // in ::testing::internal, it will be picked by the compiler in the
+ // following statement - exactly what we want.
+ PrintTo(value, os);
+ }
+
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+};
+
+// UniversalPrintArray(begin, len, os) prints an array of 'len'
+// elements, starting at address 'begin'.
+template <typename T>
+void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) {
+ if (len == 0) {
+ *os << "{}";
+ } else {
+ *os << "{ ";
+ const size_t kThreshold = 18;
+ const size_t kChunkSize = 8;
+ // If the array has more than kThreshold elements, we'll have to
+ // omit some details by printing only the first and the last
+ // kChunkSize elements.
+ // TODO(wan@google.com): let the user control the threshold using a flag.
+ if (len <= kThreshold) {
+ PrintRawArrayTo(begin, len, os);
+ } else {
+ PrintRawArrayTo(begin, kChunkSize, os);
+ *os << ", ..., ";
+ PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os);
+ }
+ *os << " }";
+ }
+}
+// This overload prints a (const) char array compactly.
+GTEST_API_ void UniversalPrintArray(
+ const char* begin, size_t len, ::std::ostream* os);
+
+// This overload prints a (const) wchar_t array compactly.
+GTEST_API_ void UniversalPrintArray(
+ const wchar_t* begin, size_t len, ::std::ostream* os);
+
+// Implements printing an array type T[N].
+template <typename T, size_t N>
+class UniversalPrinter<T[N]> {
+ public:
+ // Prints the given array, omitting some elements when there are too
+ // many.
+ static void Print(const T (&a)[N], ::std::ostream* os) {
+ UniversalPrintArray(a, N, os);
+ }
+};
+
+// Implements printing a reference type T&.
+template <typename T>
+class UniversalPrinter<T&> {
+ public:
+ // MSVC warns about adding const to a function type, so we want to
+ // disable the warning.
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180)
+
+ static void Print(const T& value, ::std::ostream* os) {
+ // Prints the address of the value. We use reinterpret_cast here
+ // as static_cast doesn't compile when T is a function type.
+ *os << "@" << reinterpret_cast<const void*>(&value) << " ";
+
+ // Then prints the value itself.
+ UniversalPrint(value, os);
+ }
+
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+};
+
+// Prints a value tersely: for a reference type, the referenced value
+// (but not the address) is printed; for a (const) char pointer, the
+// NUL-terminated string (but not the pointer) is printed.
+
+template <typename T>
+class UniversalTersePrinter {
+ public:
+ static void Print(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+ }
+};
+template <typename T>
+class UniversalTersePrinter<T&> {
+ public:
+ static void Print(const T& value, ::std::ostream* os) {
+ UniversalPrint(value, os);
+ }
+};
+template <typename T, size_t N>
+class UniversalTersePrinter<T[N]> {
+ public:
+ static void Print(const T (&value)[N], ::std::ostream* os) {
+ UniversalPrinter<T[N]>::Print(value, os);
+ }
+};
+template <>
+class UniversalTersePrinter<const char*> {
+ public:
+ static void Print(const char* str, ::std::ostream* os) {
+ if (str == NULL) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(string(str), os);
+ }
+ }
+};
+template <>
+class UniversalTersePrinter<char*> {
+ public:
+ static void Print(char* str, ::std::ostream* os) {
+ UniversalTersePrinter<const char*>::Print(str, os);
+ }
+};
+
+#if GTEST_HAS_STD_WSTRING
+template <>
+class UniversalTersePrinter<const wchar_t*> {
+ public:
+ static void Print(const wchar_t* str, ::std::ostream* os) {
+ if (str == NULL) {
+ *os << "NULL";
+ } else {
+ UniversalPrint(::std::wstring(str), os);
+ }
+ }
+};
+#endif
+
+template <>
+class UniversalTersePrinter<wchar_t*> {
+ public:
+ static void Print(wchar_t* str, ::std::ostream* os) {
+ UniversalTersePrinter<const wchar_t*>::Print(str, os);
+ }
+};
+
+template <typename T>
+void UniversalTersePrint(const T& value, ::std::ostream* os) {
+ UniversalTersePrinter<T>::Print(value, os);
+}
+
+// Prints a value using the type inferred by the compiler. The
+// difference between this and UniversalTersePrint() is that for a
+// (const) char pointer, this prints both the pointer and the
+// NUL-terminated string.
+template <typename T>
+void UniversalPrint(const T& value, ::std::ostream* os) {
+ // A workarond for the bug in VC++ 7.1 that prevents us from instantiating
+ // UniversalPrinter with T directly.
+ typedef T T1;
+ UniversalPrinter<T1>::Print(value, os);
+}
+
+typedef ::std::vector<string> Strings;
+
+// TuplePolicy<TupleT> must provide:
+// - tuple_size
+// size of tuple TupleT.
+// - get<size_t I>(const TupleT& t)
+// static function extracting element I of tuple TupleT.
+// - tuple_element<size_t I>::type
+// type of element I of tuple TupleT.
+template <typename TupleT>
+struct TuplePolicy;
+
+#if GTEST_HAS_TR1_TUPLE
+template <typename TupleT>
+struct TuplePolicy {
+ typedef TupleT Tuple;
+ static const size_t tuple_size = ::std::tr1::tuple_size<Tuple>::value;
+
+ template <size_t I>
+ struct tuple_element : ::std::tr1::tuple_element<I, Tuple> {};
+
+ template <size_t I>
+ static typename AddReference<
+ const typename ::std::tr1::tuple_element<I, Tuple>::type>::type get(
+ const Tuple& tuple) {
+ return ::std::tr1::get<I>(tuple);
+ }
+};
+template <typename TupleT>
+const size_t TuplePolicy<TupleT>::tuple_size;
+#endif // GTEST_HAS_TR1_TUPLE
+
+#if GTEST_HAS_STD_TUPLE_
+template <typename... Types>
+struct TuplePolicy< ::std::tuple<Types...> > {
+ typedef ::std::tuple<Types...> Tuple;
+ static const size_t tuple_size = ::std::tuple_size<Tuple>::value;
+
+ template <size_t I>
+ struct tuple_element : ::std::tuple_element<I, Tuple> {};
+
+ template <size_t I>
+ static const typename ::std::tuple_element<I, Tuple>::type& get(
+ const Tuple& tuple) {
+ return ::std::get<I>(tuple);
+ }
+};
+template <typename... Types>
+const size_t TuplePolicy< ::std::tuple<Types...> >::tuple_size;
+#endif // GTEST_HAS_STD_TUPLE_
+
+#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+// This helper template allows PrintTo() for tuples and
+// UniversalTersePrintTupleFieldsToStrings() to be defined by
+// induction on the number of tuple fields. The idea is that
+// TuplePrefixPrinter<N>::PrintPrefixTo(t, os) prints the first N
+// fields in tuple t, and can be defined in terms of
+// TuplePrefixPrinter<N - 1>.
+//
+// The inductive case.
+template <size_t N>
+struct TuplePrefixPrinter {
+ // Prints the first N fields of a tuple.
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) {
+ TuplePrefixPrinter<N - 1>::PrintPrefixTo(t, os);
+ GTEST_INTENTIONAL_CONST_COND_PUSH_()
+ if (N > 1) {
+ GTEST_INTENTIONAL_CONST_COND_POP_()
+ *os << ", ";
+ }
+ UniversalPrinter<
+ typename TuplePolicy<Tuple>::template tuple_element<N - 1>::type>
+ ::Print(TuplePolicy<Tuple>::template get<N - 1>(t), os);
+ }
+
+ // Tersely prints the first N fields of a tuple to a string vector,
+ // one element for each field.
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) {
+ TuplePrefixPrinter<N - 1>::TersePrintPrefixToStrings(t, strings);
+ ::std::stringstream ss;
+ UniversalTersePrint(TuplePolicy<Tuple>::template get<N - 1>(t), &ss);
+ strings->push_back(ss.str());
+ }
+};
+
+// Base case.
+template <>
+struct TuplePrefixPrinter<0> {
+ template <typename Tuple>
+ static void PrintPrefixTo(const Tuple&, ::std::ostream*) {}
+
+ template <typename Tuple>
+ static void TersePrintPrefixToStrings(const Tuple&, Strings*) {}
+};
+
+// Helper function for printing a tuple.
+// Tuple must be either std::tr1::tuple or std::tuple type.
+template <typename Tuple>
+void PrintTupleTo(const Tuple& t, ::std::ostream* os) {
+ *os << "(";
+ TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::PrintPrefixTo(t, os);
+ *os << ")";
+}
+
+// Prints the fields of a tuple tersely to a string vector, one
+// element for each field. See the comment before
+// UniversalTersePrint() for how we define "tersely".
+template <typename Tuple>
+Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) {
+ Strings result;
+ TuplePrefixPrinter<TuplePolicy<Tuple>::tuple_size>::
+ TersePrintPrefixToStrings(value, &result);
+ return result;
+}
+#endif // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_
+
+} // namespace internal
+
+template <typename T>
+::std::string PrintToString(const T& value) {
+ ::std::stringstream ss;
+ internal::UniversalTersePrinter<T>::Print(value, &ss);
+ return ss.str();
+}
+
+} // namespace testing
+
+// Include any custom printer added by the local installation.
+// We must include this header at the end to make sure it can use the
+// declarations from this file.
+#include "gtest/internal/custom/gtest-printers.h"
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-spi.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-spi.h
new file mode 100644
index 000000000..f63fa9a1b
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-spi.h
@@ -0,0 +1,232 @@
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Utilities for testing Google Test itself and code that uses Google Test
+// (e.g. frameworks built on top of Google Test).
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_
+
+#include "gtest/gtest.h"
+
+namespace testing {
+
+// This helper class can be used to mock out Google Test failure reporting
+// so that we can test Google Test or code that builds on Google Test.
+//
+// An object of this class appends a TestPartResult object to the
+// TestPartResultArray object given in the constructor whenever a Google Test
+// failure is reported. It can either intercept only failures that are
+// generated in the same thread that created this object or it can intercept
+// all generated failures. The scope of this mock object can be controlled with
+// the second argument to the two arguments constructor.
+class GTEST_API_ ScopedFakeTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ // The two possible mocking modes of this object.
+ enum InterceptMode {
+ INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures.
+ INTERCEPT_ALL_THREADS // Intercepts all failures.
+ };
+
+ // The c'tor sets this object as the test part result reporter used
+ // by Google Test. The 'result' parameter specifies where to report the
+ // results. This reporter will only catch failures generated in the current
+ // thread. DEPRECATED
+ explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result);
+
+ // Same as above, but you can choose the interception scope of this object.
+ ScopedFakeTestPartResultReporter(InterceptMode intercept_mode,
+ TestPartResultArray* result);
+
+ // The d'tor restores the previous test part result reporter.
+ virtual ~ScopedFakeTestPartResultReporter();
+
+ // Appends the TestPartResult object to the TestPartResultArray
+ // received in the constructor.
+ //
+ // This method is from the TestPartResultReporterInterface
+ // interface.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ private:
+ void Init();
+
+ const InterceptMode intercept_mode_;
+ TestPartResultReporterInterface* old_reporter_;
+ TestPartResultArray* const result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter);
+};
+
+namespace internal {
+
+// A helper class for implementing EXPECT_FATAL_FAILURE() and
+// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+class GTEST_API_ SingleFailureChecker {
+ public:
+ // The constructor remembers the arguments.
+ SingleFailureChecker(const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const string& substr);
+ ~SingleFailureChecker();
+ private:
+ const TestPartResultArray* const results_;
+ const TestPartResult::Type type_;
+ const string substr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+// A set of macros for testing Google Test assertions or code that's expected
+// to generate Google Test fatal failures. It verifies that the given
+// statement will cause exactly one fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_FATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - 'statement' cannot reference local non-static variables or
+// non-static members of the current object.
+// - 'statement' cannot return a value.
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. The AcceptsMacroThatExpandsToUnprotectedComma test in
+// gtest_unittest.cc will fail to compile if we do that.
+#define EXPECT_FATAL_FAILURE(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do { \
+ class GTestExpectFatalFailureHelper {\
+ public:\
+ static void Execute() { statement; }\
+ };\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kFatalFailure, (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ALL_THREADS, &gtest_failures);\
+ GTestExpectFatalFailureHelper::Execute();\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+// A macro for testing Google Test assertions or code that's expected to
+// generate Google Test non-fatal failures. It asserts that the given
+// statement will cause exactly one non-fatal Google Test failure with 'substr'
+// being part of the failure message.
+//
+// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only
+// affects and considers failures generated in the current thread and
+// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads.
+//
+// 'statement' is allowed to reference local variables and members of
+// the current object.
+//
+// The verification of the assertion is done correctly even when the statement
+// throws an exception or aborts the current function.
+//
+// Known restrictions:
+// - You cannot stream a failure message to this macro.
+//
+// Note that even though the implementations of the following two
+// macros are much alike, we cannot refactor them to use a common
+// helper macro, due to some peculiarity in how the preprocessor
+// works. If we do that, the code won't compile when the user gives
+// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that
+// expands to code containing an unprotected comma. The
+// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc
+// catches that.
+//
+// For the same reason, we have to write
+// if (::testing::internal::AlwaysTrue()) { statement; }
+// instead of
+// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+// to avoid an MSVC warning on unreachable code.
+#define EXPECT_NONFATAL_FAILURE(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter:: \
+ INTERCEPT_ONLY_CURRENT_THREAD, &gtest_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \
+ do {\
+ ::testing::TestPartResultArray gtest_failures;\
+ ::testing::internal::SingleFailureChecker gtest_checker(\
+ &gtest_failures, ::testing::TestPartResult::kNonFatalFailure, \
+ (substr));\
+ {\
+ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\
+ ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \
+ &gtest_failures);\
+ if (::testing::internal::AlwaysTrue()) { statement; }\
+ }\
+ } while (::testing::internal::AlwaysFalse())
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-test-part.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-test-part.h
new file mode 100644
index 000000000..77eb84483
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-test-part.h
@@ -0,0 +1,179 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
+
+#include <iosfwd>
+#include <vector>
+#include "gtest/internal/gtest-internal.h"
+#include "gtest/internal/gtest-string.h"
+
+namespace testing {
+
+// A copyable object representing the result of a test part (i.e. an
+// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()).
+//
+// Don't inherit from TestPartResult as its destructor is not virtual.
+class GTEST_API_ TestPartResult {
+ public:
+ // The possible outcomes of a test part (i.e. an assertion or an
+ // explicit SUCCEED(), FAIL(), or ADD_FAILURE()).
+ enum Type {
+ kSuccess, // Succeeded.
+ kNonFatalFailure, // Failed but the test can continue.
+ kFatalFailure // Failed and the test should be terminated.
+ };
+
+ // C'tor. TestPartResult does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestPartResult object.
+ TestPartResult(Type a_type,
+ const char* a_file_name,
+ int a_line_number,
+ const char* a_message)
+ : type_(a_type),
+ file_name_(a_file_name == NULL ? "" : a_file_name),
+ line_number_(a_line_number),
+ summary_(ExtractSummary(a_message)),
+ message_(a_message) {
+ }
+
+ // Gets the outcome of the test part.
+ Type type() const { return type_; }
+
+ // Gets the name of the source file where the test part took place, or
+ // NULL if it's unknown.
+ const char* file_name() const {
+ return file_name_.empty() ? NULL : file_name_.c_str();
+ }
+
+ // Gets the line in the source file where the test part took place,
+ // or -1 if it's unknown.
+ int line_number() const { return line_number_; }
+
+ // Gets the summary of the failure message.
+ const char* summary() const { return summary_.c_str(); }
+
+ // Gets the message associated with the test part.
+ const char* message() const { return message_.c_str(); }
+
+ // Returns true iff the test part passed.
+ bool passed() const { return type_ == kSuccess; }
+
+ // Returns true iff the test part failed.
+ bool failed() const { return type_ != kSuccess; }
+
+ // Returns true iff the test part non-fatally failed.
+ bool nonfatally_failed() const { return type_ == kNonFatalFailure; }
+
+ // Returns true iff the test part fatally failed.
+ bool fatally_failed() const { return type_ == kFatalFailure; }
+
+ private:
+ Type type_;
+
+ // Gets the summary of the failure message by omitting the stack
+ // trace in it.
+ static std::string ExtractSummary(const char* message);
+
+ // The name of the source file where the test part took place, or
+ // "" if the source file is unknown.
+ std::string file_name_;
+ // The line in the source file where the test part took place, or -1
+ // if the line number is unknown.
+ int line_number_;
+ std::string summary_; // The test failure summary.
+ std::string message_; // The test failure message.
+};
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result);
+
+// An array of TestPartResult objects.
+//
+// Don't inherit from TestPartResultArray as its destructor is not
+// virtual.
+class GTEST_API_ TestPartResultArray {
+ public:
+ TestPartResultArray() {}
+
+ // Appends the given TestPartResult to the array.
+ void Append(const TestPartResult& result);
+
+ // Returns the TestPartResult at the given index (0-based).
+ const TestPartResult& GetTestPartResult(int index) const;
+
+ // Returns the number of TestPartResult objects in the array.
+ int size() const;
+
+ private:
+ std::vector<TestPartResult> array_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray);
+};
+
+// This interface knows how to report a test part result.
+class TestPartResultReporterInterface {
+ public:
+ virtual ~TestPartResultReporterInterface() {}
+
+ virtual void ReportTestPartResult(const TestPartResult& result) = 0;
+};
+
+namespace internal {
+
+// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a
+// statement generates new fatal failures. To do so it registers itself as the
+// current test part result reporter. Besides checking if fatal failures were
+// reported, it only delegates the reporting to the former result reporter.
+// The original result reporter is restored in the destructor.
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+class GTEST_API_ HasNewFatalFailureHelper
+ : public TestPartResultReporterInterface {
+ public:
+ HasNewFatalFailureHelper();
+ virtual ~HasNewFatalFailureHelper();
+ virtual void ReportTestPartResult(const TestPartResult& result);
+ bool has_new_fatal_failure() const { return has_new_fatal_failure_; }
+ private:
+ bool has_new_fatal_failure_;
+ TestPartResultReporterInterface* original_reporter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper);
+};
+
+} // namespace internal
+
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-typed-test.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-typed-test.h
new file mode 100644
index 000000000..5f69d5678
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest-typed-test.h
@@ -0,0 +1,263 @@
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
+
+// This header implements typed tests and type-parameterized tests.
+
+// Typed (aka type-driven) tests repeat the same test for types in a
+// list. You must know which types you want to test with when writing
+// typed tests. Here's how you do it:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ public:
+ ...
+ typedef std::list<T> List;
+ static T shared_;
+ T value_;
+};
+
+// Next, associate a list of types with the test case, which will be
+// repeated for each type in the list. The typedef is necessary for
+// the macro to parse correctly.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+TYPED_TEST_CASE(FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// TYPED_TEST_CASE(FooTest, int);
+
+// Then, use TYPED_TEST() instead of TEST_F() to define as many typed
+// tests for this test case as you want.
+TYPED_TEST(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ // Since we are inside a derived class template, C++ requires use to
+ // visit the members of FooTest via 'this'.
+ TypeParam n = this->value_;
+
+ // To visit static members of the fixture, add the TestFixture::
+ // prefix.
+ n += TestFixture::shared_;
+
+ // To refer to typedefs in the fixture, add the "typename
+ // TestFixture::" prefix.
+ typename TestFixture::List values;
+ values.push_back(n);
+ ...
+}
+
+TYPED_TEST(FooTest, HasPropertyA) { ... }
+
+#endif // 0
+
+// Type-parameterized tests are abstract test patterns parameterized
+// by a type. Compared with typed tests, type-parameterized tests
+// allow you to define the test pattern without knowing what the type
+// parameters are. The defined pattern can be instantiated with
+// different types any number of times, in any number of translation
+// units.
+//
+// If you are designing an interface or concept, you can define a
+// suite of type-parameterized tests to verify properties that any
+// valid implementation of the interface/concept should have. Then,
+// each implementation can easily instantiate the test suite to verify
+// that it conforms to the requirements, without having to write
+// similar tests repeatedly. Here's an example:
+
+#if 0
+
+// First, define a fixture class template. It should be parameterized
+// by a type. Remember to derive it from testing::Test.
+template <typename T>
+class FooTest : public testing::Test {
+ ...
+};
+
+// Next, declare that you will define a type-parameterized test case
+// (the _P suffix is for "parameterized" or "pattern", whichever you
+// prefer):
+TYPED_TEST_CASE_P(FooTest);
+
+// Then, use TYPED_TEST_P() to define as many type-parameterized tests
+// for this type-parameterized test case as you want.
+TYPED_TEST_P(FooTest, DoesBlah) {
+ // Inside a test, refer to TypeParam to get the type parameter.
+ TypeParam n = 0;
+ ...
+}
+
+TYPED_TEST_P(FooTest, HasPropertyA) { ... }
+
+// Now the tricky part: you need to register all test patterns before
+// you can instantiate them. The first argument of the macro is the
+// test case name; the rest are the names of the tests in this test
+// case.
+REGISTER_TYPED_TEST_CASE_P(FooTest,
+ DoesBlah, HasPropertyA);
+
+// Finally, you are free to instantiate the pattern with the types you
+// want. If you put the above code in a header file, you can #include
+// it in multiple C++ source files and instantiate it multiple times.
+//
+// To distinguish different instances of the pattern, the first
+// argument to the INSTANTIATE_* macro is a prefix that will be added
+// to the actual test case name. Remember to pick unique prefixes for
+// different instances.
+typedef testing::Types<char, int, unsigned int> MyTypes;
+INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes);
+
+// If the type list contains only one type, you can write that type
+// directly without Types<...>:
+// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int);
+
+#endif // 0
+
+#include "gtest/internal/gtest-port.h"
+#include "gtest/internal/gtest-type-util.h"
+
+// Implements typed tests.
+
+#if GTEST_HAS_TYPED_TEST
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the typedef for the type parameters of the
+// given test case.
+# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define TYPED_TEST_CASE(CaseName, Types) \
+ typedef ::testing::internal::TypeList< Types >::type \
+ GTEST_TYPE_PARAMS_(CaseName)
+
+# define TYPED_TEST(CaseName, TestName) \
+ template <typename gtest_TypeParam_> \
+ class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \
+ : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTest< \
+ CaseName, \
+ ::testing::internal::TemplateSel< \
+ GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \
+ GTEST_TYPE_PARAMS_(CaseName)>::Register(\
+ "", ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+ #CaseName, #TestName, 0); \
+ template <typename gtest_TypeParam_> \
+ void GTEST_TEST_CLASS_NAME_(CaseName, TestName)<gtest_TypeParam_>::TestBody()
+
+#endif // GTEST_HAS_TYPED_TEST
+
+// Implements type-parameterized tests.
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the namespace name that the type-parameterized tests for
+// the given type-parameterized test case are defined in. The exact
+// name of the namespace is subject to change without notice.
+# define GTEST_CASE_NAMESPACE_(TestCaseName) \
+ gtest_case_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Expands to the name of the variable used to remember the names of
+// the defined tests in the given test case.
+# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \
+ gtest_typed_test_case_p_state_##TestCaseName##_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY.
+//
+// Expands to the name of the variable used to remember the names of
+// the registered tests in the given test case.
+# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \
+ gtest_registered_test_names_##TestCaseName##_
+
+// The variables defined in the type-parameterized test macros are
+// static as typically these macros are used in a .h file that can be
+// #included in multiple translation units linked together.
+# define TYPED_TEST_CASE_P(CaseName) \
+ static ::testing::internal::TypedTestCasePState \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName)
+
+# define TYPED_TEST_P(CaseName, TestName) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ template <typename gtest_TypeParam_> \
+ class TestName : public CaseName<gtest_TypeParam_> { \
+ private: \
+ typedef CaseName<gtest_TypeParam_> TestFixture; \
+ typedef gtest_TypeParam_ TypeParam; \
+ virtual void TestBody(); \
+ }; \
+ static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\
+ __FILE__, __LINE__, #CaseName, #TestName); \
+ } \
+ template <typename gtest_TypeParam_> \
+ void GTEST_CASE_NAMESPACE_(CaseName)::TestName<gtest_TypeParam_>::TestBody()
+
+# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \
+ namespace GTEST_CASE_NAMESPACE_(CaseName) { \
+ typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \
+ } \
+ static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \
+ GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\
+ __FILE__, __LINE__, #__VA_ARGS__)
+
+// The 'Types' template argument below must have spaces around it
+// since some compilers may choke on '>>' when passing a template
+// instance (e.g. Types<int>)
+# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \
+ bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \
+ ::testing::internal::TypeParameterizedTestCase<CaseName, \
+ GTEST_CASE_NAMESPACE_(CaseName)::gtest_AllTests_, \
+ ::testing::internal::TypeList< Types >::type>::Register(\
+ #Prefix, \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+ &GTEST_TYPED_TEST_CASE_P_STATE_(CaseName), \
+ #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName))
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest.h
new file mode 100644
index 000000000..f846c5bd6
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest.h
@@ -0,0 +1,2236 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the public API for Google Test. It should be
+// included by any test program that uses Google Test.
+//
+// IMPORTANT NOTE: Due to limitation of the C++ language, we have to
+// leave some internal implementation details in this header file.
+// They are clearly marked by comments like this:
+//
+// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+//
+// Such code is NOT meant to be used by a user directly, and is subject
+// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user
+// program!
+//
+// Acknowledgment: Google Test borrowed the idea of automatic test
+// registration from Barthelemy Dagenais' (barthelemy@prologique.com)
+// easyUnit framework.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+#define GTEST_INCLUDE_GTEST_GTEST_H_
+
+#include <limits>
+#include <ostream>
+#include <vector>
+
+#include "gtest/internal/gtest-internal.h"
+#include "gtest/internal/gtest-string.h"
+#include "gtest/gtest-death-test.h"
+#include "gtest/gtest-message.h"
+#include "gtest/gtest-param-test.h"
+#include "gtest/gtest-printers.h"
+#include "gtest/gtest_prod.h"
+#include "gtest/gtest-test-part.h"
+#include "gtest/gtest-typed-test.h"
+
+// Depending on the platform, different string classes are available.
+// On Linux, in addition to ::std::string, Google also makes use of
+// class ::string, which has the same interface as ::std::string, but
+// has a different implementation.
+//
+// You can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that
+// ::string is available AND is a distinct type to ::std::string, or
+// define it to 0 to indicate otherwise.
+//
+// If ::std::string and ::string are the same class on your platform
+// due to aliasing, you should define GTEST_HAS_GLOBAL_STRING to 0.
+//
+// If you do not define GTEST_HAS_GLOBAL_STRING, it is defined
+// heuristically.
+
+namespace testing {
+
+// Declares the flags.
+
+// This flag temporary enables the disabled tests.
+GTEST_DECLARE_bool_(also_run_disabled_tests);
+
+// This flag brings the debugger on an assertion failure.
+GTEST_DECLARE_bool_(break_on_failure);
+
+// This flag controls whether Google Test catches all test-thrown exceptions
+// and logs them as failures.
+GTEST_DECLARE_bool_(catch_exceptions);
+
+// This flag enables using colors in terminal output. Available values are
+// "yes" to enable colors, "no" (disable colors), or "auto" (the default)
+// to let Google Test decide.
+GTEST_DECLARE_string_(color);
+
+// This flag sets up the filter to select by name using a glob pattern
+// the tests to run. If the filter is not given all tests are executed.
+GTEST_DECLARE_string_(filter);
+
+// This flag causes the Google Test to list tests. None of the tests listed
+// are actually run if the flag is provided.
+GTEST_DECLARE_bool_(list_tests);
+
+// This flag controls whether Google Test emits a detailed XML report to a file
+// in addition to its normal textual output.
+GTEST_DECLARE_string_(output);
+
+// This flags control whether Google Test prints the elapsed time for each
+// test.
+GTEST_DECLARE_bool_(print_time);
+
+// This flag specifies the random number seed.
+GTEST_DECLARE_int32_(random_seed);
+
+// This flag sets how many times the tests are repeated. The default value
+// is 1. If the value is -1 the tests are repeating forever.
+GTEST_DECLARE_int32_(repeat);
+
+// This flag controls whether Google Test includes Google Test internal
+// stack frames in failure stack traces.
+GTEST_DECLARE_bool_(show_internal_stack_frames);
+
+// When this flag is specified, tests' order is randomized on every iteration.
+GTEST_DECLARE_bool_(shuffle);
+
+// This flag specifies the maximum number of stack frames to be
+// printed in a failure message.
+GTEST_DECLARE_int32_(stack_trace_depth);
+
+// When this flag is specified, a failed assertion will throw an
+// exception if exceptions are enabled, or exit the program with a
+// non-zero code otherwise.
+GTEST_DECLARE_bool_(throw_on_failure);
+
+// When this flag is set with a "host:port" string, on supported
+// platforms test results are streamed to the specified port on
+// the specified host machine.
+GTEST_DECLARE_string_(stream_result_to);
+
+// The upper limit for valid stack trace depths.
+const int kMaxStackTraceDepth = 100;
+
+namespace internal {
+
+class AssertHelper;
+class DefaultGlobalTestPartResultReporter;
+class ExecDeathTest;
+class NoExecDeathTest;
+class FinalSuccessChecker;
+class GTestFlagSaver;
+class StreamingListenerTest;
+class TestResultAccessor;
+class TestEventListenersAccessor;
+class TestEventRepeater;
+class UnitTestRecordPropertyTestHelper;
+class WindowsDeathTest;
+class UnitTestImpl* GetUnitTestImpl();
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const std::string& message);
+
+} // namespace internal
+
+// The friend relationship of some of these classes is cyclic.
+// If we don't forward declare them the compiler might confuse the classes
+// in friendship clauses with same named classes on the scope.
+class Test;
+class TestCase;
+class TestInfo;
+class UnitTest;
+
+// A class for indicating whether an assertion was successful. When
+// the assertion wasn't successful, the AssertionResult object
+// remembers a non-empty message that describes how it failed.
+//
+// To create an instance of this class, use one of the factory functions
+// (AssertionSuccess() and AssertionFailure()).
+//
+// This class is useful for two purposes:
+// 1. Defining predicate functions to be used with Boolean test assertions
+// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts
+// 2. Defining predicate-format functions to be
+// used with predicate assertions (ASSERT_PRED_FORMAT*, etc).
+//
+// For example, if you define IsEven predicate:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5)))
+// will print the message
+//
+// Value of: IsEven(Fib(5))
+// Actual: false (5 is odd)
+// Expected: true
+//
+// instead of a more opaque
+//
+// Value of: IsEven(Fib(5))
+// Actual: false
+// Expected: true
+//
+// in case IsEven is a simple Boolean predicate.
+//
+// If you expect your predicate to be reused and want to support informative
+// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up
+// about half as often as positive ones in our tests), supply messages for
+// both success and failure cases:
+//
+// testing::AssertionResult IsEven(int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess() << n << " is even";
+// else
+// return testing::AssertionFailure() << n << " is odd";
+// }
+//
+// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print
+//
+// Value of: IsEven(Fib(6))
+// Actual: true (8 is even)
+// Expected: false
+//
+// NB: Predicates that support negative Boolean assertions have reduced
+// performance in positive ones so be careful not to use them in tests
+// that have lots (tens of thousands) of positive Boolean assertions.
+//
+// To use this class with EXPECT_PRED_FORMAT assertions such as:
+//
+// // Verifies that Foo() returns an even number.
+// EXPECT_PRED_FORMAT1(IsEven, Foo());
+//
+// you need to define:
+//
+// testing::AssertionResult IsEven(const char* expr, int n) {
+// if ((n % 2) == 0)
+// return testing::AssertionSuccess();
+// else
+// return testing::AssertionFailure()
+// << "Expected: " << expr << " is even\n Actual: it's " << n;
+// }
+//
+// If Foo() returns 5, you will see the following message:
+//
+// Expected: Foo() is even
+// Actual: it's 5
+//
+class GTEST_API_ AssertionResult {
+ public:
+ // Copy constructor.
+ // Used in EXPECT_TRUE/FALSE(assertion_result).
+ AssertionResult(const AssertionResult& other);
+
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */)
+
+ // Used in the EXPECT_TRUE/FALSE(bool_expression).
+ //
+ // T must be contextually convertible to bool.
+ //
+ // The second parameter prevents this overload from being considered if
+ // the argument is implicitly convertible to AssertionResult. In that case
+ // we want AssertionResult's copy constructor to be used.
+ template <typename T>
+ explicit AssertionResult(
+ const T& success,
+ typename internal::EnableIf<
+ !internal::ImplicitlyConvertible<T, AssertionResult>::value>::type*
+ /*enabler*/ = NULL)
+ : success_(success) {}
+
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+ // Assignment operator.
+ AssertionResult& operator=(AssertionResult other) {
+ swap(other);
+ return *this;
+ }
+
+ // Returns true iff the assertion succeeded.
+ operator bool() const { return success_; } // NOLINT
+
+ // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+ AssertionResult operator!() const;
+
+ // Returns the text streamed into this AssertionResult. Test assertions
+ // use it when they fail (i.e., the predicate's outcome doesn't match the
+ // assertion's expectation). When nothing has been streamed into the
+ // object, returns an empty string.
+ const char* message() const {
+ return message_.get() != NULL ? message_->c_str() : "";
+ }
+ // TODO(vladl@google.com): Remove this after making sure no clients use it.
+ // Deprecated; please use message() instead.
+ const char* failure_message() const { return message(); }
+
+ // Streams a custom failure message into this object.
+ template <typename T> AssertionResult& operator<<(const T& value) {
+ AppendMessage(Message() << value);
+ return *this;
+ }
+
+ // Allows streaming basic output manipulators such as endl or flush into
+ // this object.
+ AssertionResult& operator<<(
+ ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) {
+ AppendMessage(Message() << basic_manipulator);
+ return *this;
+ }
+
+ private:
+ // Appends the contents of message to message_.
+ void AppendMessage(const Message& a_message) {
+ if (message_.get() == NULL)
+ message_.reset(new ::std::string);
+ message_->append(a_message.GetString().c_str());
+ }
+
+ // Swap the contents of this AssertionResult with other.
+ void swap(AssertionResult& other);
+
+ // Stores result of the assertion predicate.
+ bool success_;
+ // Stores the message describing the condition in case the expectation
+ // construct is not satisfied with the predicate's outcome.
+ // Referenced via a pointer to avoid taking too much stack frame space
+ // with test assertions.
+ internal::scoped_ptr< ::std::string> message_;
+};
+
+// Makes a successful assertion result.
+GTEST_API_ AssertionResult AssertionSuccess();
+
+// Makes a failed assertion result.
+GTEST_API_ AssertionResult AssertionFailure();
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << msg.
+GTEST_API_ AssertionResult AssertionFailure(const Message& msg);
+
+// The abstract class that all tests inherit from.
+//
+// In Google Test, a unit test program contains one or many TestCases, and
+// each TestCase contains one or many Tests.
+//
+// When you define a test using the TEST macro, you don't need to
+// explicitly derive from Test - the TEST macro automatically does
+// this for you.
+//
+// The only time you derive from Test is when defining a test fixture
+// to be used a TEST_F. For example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// void SetUp() override { ... }
+// void TearDown() override { ... }
+// ...
+// };
+//
+// TEST_F(FooTest, Bar) { ... }
+// TEST_F(FooTest, Baz) { ... }
+//
+// Test is not copyable.
+class GTEST_API_ Test {
+ public:
+ friend class TestInfo;
+
+ // Defines types for pointers to functions that set up and tear down
+ // a test case.
+ typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc;
+ typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc;
+
+ // The d'tor is virtual as we intend to inherit from Test.
+ virtual ~Test();
+
+ // Sets up the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::SetUpTestCase() before running the first
+ // test in test case Foo. Hence a sub-class can define its own
+ // SetUpTestCase() method to shadow the one defined in the super
+ // class.
+ static void SetUpTestCase() {}
+
+ // Tears down the stuff shared by all tests in this test case.
+ //
+ // Google Test will call Foo::TearDownTestCase() after running the last
+ // test in test case Foo. Hence a sub-class can define its own
+ // TearDownTestCase() method to shadow the one defined in the super
+ // class.
+ static void TearDownTestCase() {}
+
+ // Returns true iff the current test has a fatal failure.
+ static bool HasFatalFailure();
+
+ // Returns true iff the current test has a non-fatal failure.
+ static bool HasNonfatalFailure();
+
+ // Returns true iff the current test has a (either fatal or
+ // non-fatal) failure.
+ static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); }
+
+ // Logs a property for the current test, test case, or for the entire
+ // invocation of the test program when used outside of the context of a
+ // test case. Only the last value for a given key is remembered. These
+ // are public static so they can be called from utility functions that are
+ // not members of the test fixture. Calls to RecordProperty made during
+ // lifespan of the test (from the moment its constructor starts to the
+ // moment its destructor finishes) will be output in XML as attributes of
+ // the <testcase> element. Properties recorded from fixture's
+ // SetUpTestCase or TearDownTestCase are logged as attributes of the
+ // corresponding <testsuite> element. Calls to RecordProperty made in the
+ // global context (before or after invocation of RUN_ALL_TESTS and from
+ // SetUp/TearDown method of Environment objects registered with Google
+ // Test) will be output as attributes of the <testsuites> element.
+ static void RecordProperty(const std::string& key, const std::string& value);
+ static void RecordProperty(const std::string& key, int value);
+
+ protected:
+ // Creates a Test object.
+ Test();
+
+ // Sets up the test fixture.
+ virtual void SetUp();
+
+ // Tears down the test fixture.
+ virtual void TearDown();
+
+ private:
+ // Returns true iff the current test has the same fixture class as
+ // the first test in the current test case.
+ static bool HasSameFixtureClass();
+
+ // Runs the test after the test fixture has been set up.
+ //
+ // A sub-class must implement this to define the test logic.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM.
+ // Instead, use the TEST or TEST_F macro.
+ virtual void TestBody() = 0;
+
+ // Sets up, executes, and tears down the test.
+ void Run();
+
+ // Deletes self. We deliberately pick an unusual name for this
+ // internal method to avoid clashing with names used in user TESTs.
+ void DeleteSelf_() { delete this; }
+
+ const internal::scoped_ptr< GTEST_FLAG_SAVER_ > gtest_flag_saver_;
+
+ // Often a user misspells SetUp() as Setup() and spends a long time
+ // wondering why it is never called by Google Test. The declaration of
+ // the following method is solely for catching such an error at
+ // compile time:
+ //
+ // - The return type is deliberately chosen to be not void, so it
+ // will be a conflict if void Setup() is declared in the user's
+ // test fixture.
+ //
+ // - This method is private, so it will be another compiler error
+ // if the method is called from the user's test fixture.
+ //
+ // DO NOT OVERRIDE THIS FUNCTION.
+ //
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+
+ // We disallow copying Tests.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Test);
+};
+
+typedef internal::TimeInMillis TimeInMillis;
+
+// A copyable object representing a user specified test property which can be
+// output as a key/value string pair.
+//
+// Don't inherit from TestProperty as its destructor is not virtual.
+class TestProperty {
+ public:
+ // C'tor. TestProperty does NOT have a default constructor.
+ // Always use this constructor (with parameters) to create a
+ // TestProperty object.
+ TestProperty(const std::string& a_key, const std::string& a_value) :
+ key_(a_key), value_(a_value) {
+ }
+
+ // Gets the user supplied key.
+ const char* key() const {
+ return key_.c_str();
+ }
+
+ // Gets the user supplied value.
+ const char* value() const {
+ return value_.c_str();
+ }
+
+ // Sets a new value, overriding the one supplied in the constructor.
+ void SetValue(const std::string& new_value) {
+ value_ = new_value;
+ }
+
+ private:
+ // The key supplied by the user.
+ std::string key_;
+ // The value supplied by the user.
+ std::string value_;
+};
+
+// The result of a single Test. This includes a list of
+// TestPartResults, a list of TestProperties, a count of how many
+// death tests there are in the Test, and how much time it took to run
+// the Test.
+//
+// TestResult is not copyable.
+class GTEST_API_ TestResult {
+ public:
+ // Creates an empty TestResult.
+ TestResult();
+
+ // D'tor. Do not inherit from TestResult.
+ ~TestResult();
+
+ // Gets the number of all test parts. This is the sum of the number
+ // of successful test parts and the number of failed test parts.
+ int total_part_count() const;
+
+ // Returns the number of the test properties.
+ int test_property_count() const;
+
+ // Returns true iff the test passed (i.e. no test part failed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test failed.
+ bool Failed() const;
+
+ // Returns true iff the test fatally failed.
+ bool HasFatalFailure() const;
+
+ // Returns true iff the test has a non-fatal failure.
+ bool HasNonfatalFailure() const;
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test part result among all the results. i can range
+ // from 0 to test_property_count() - 1. If i is not in that range, aborts
+ // the program.
+ const TestPartResult& GetTestPartResult(int i) const;
+
+ // Returns the i-th test property. i can range from 0 to
+ // test_property_count() - 1. If i is not in that range, aborts the
+ // program.
+ const TestProperty& GetTestProperty(int i) const;
+
+ private:
+ friend class TestInfo;
+ friend class TestCase;
+ friend class UnitTest;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::ExecDeathTest;
+ friend class internal::TestResultAccessor;
+ friend class internal::UnitTestImpl;
+ friend class internal::WindowsDeathTest;
+
+ // Gets the vector of TestPartResults.
+ const std::vector<TestPartResult>& test_part_results() const {
+ return test_part_results_;
+ }
+
+ // Gets the vector of TestProperties.
+ const std::vector<TestProperty>& test_properties() const {
+ return test_properties_;
+ }
+
+ // Sets the elapsed time.
+ void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; }
+
+ // Adds a test property to the list. The property is validated and may add
+ // a non-fatal failure if invalid (e.g., if it conflicts with reserved
+ // key names). If a property is already recorded for the same key, the
+ // value will be updated, rather than storing multiple values for the same
+ // key. xml_element specifies the element for which the property is being
+ // recorded and is used for validation.
+ void RecordProperty(const std::string& xml_element,
+ const TestProperty& test_property);
+
+ // Adds a failure if the key is a reserved attribute of Google Test
+ // testcase tags. Returns true if the property is valid.
+ // TODO(russr): Validate attribute names are legal and human readable.
+ static bool ValidateTestProperty(const std::string& xml_element,
+ const TestProperty& test_property);
+
+ // Adds a test part result to the list.
+ void AddTestPartResult(const TestPartResult& test_part_result);
+
+ // Returns the death test count.
+ int death_test_count() const { return death_test_count_; }
+
+ // Increments the death test count, returning the new count.
+ int increment_death_test_count() { return ++death_test_count_; }
+
+ // Clears the test part results.
+ void ClearTestPartResults();
+
+ // Clears the object.
+ void Clear();
+
+ // Protects mutable state of the property vector and of owned
+ // properties, whose values may be updated.
+ internal::Mutex test_properites_mutex_;
+
+ // The vector of TestPartResults
+ std::vector<TestPartResult> test_part_results_;
+ // The vector of TestProperties
+ std::vector<TestProperty> test_properties_;
+ // Running count of death tests.
+ int death_test_count_;
+ // The elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+ // We disallow copying TestResult.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult);
+}; // class TestResult
+
+// A TestInfo object stores the following information about a test:
+//
+// Test case name
+// Test name
+// Whether the test should be run
+// A function pointer that creates the test object when invoked
+// Test result
+//
+// The constructor of TestInfo registers itself with the UnitTest
+// singleton such that the RUN_ALL_TESTS() macro knows which tests to
+// run.
+class GTEST_API_ TestInfo {
+ public:
+ // Destructs a TestInfo object. This function is not virtual, so
+ // don't inherit from TestInfo.
+ ~TestInfo();
+
+ // Returns the test case name.
+ const char* test_case_name() const { return test_case_name_.c_str(); }
+
+ // Returns the test name.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a typed
+ // or a type-parameterized test.
+ const char* type_param() const {
+ if (type_param_.get() != NULL)
+ return type_param_->c_str();
+ return NULL;
+ }
+
+ // Returns the text representation of the value parameter, or NULL if this
+ // is not a value-parameterized test.
+ const char* value_param() const {
+ if (value_param_.get() != NULL)
+ return value_param_->c_str();
+ return NULL;
+ }
+
+ // Returns the file name where this test is defined.
+ const char* file() const { return location_.file.c_str(); }
+
+ // Returns the line where this test is defined.
+ int line() const { return location_.line; }
+
+ // Returns true if this test should run, that is if the test is not
+ // disabled (or it is disabled but the also_run_disabled_tests flag has
+ // been specified) and its full name matches the user-specified filter.
+ //
+ // Google Test allows the user to filter the tests by their full names.
+ // The full name of a test Bar in test case Foo is defined as
+ // "Foo.Bar". Only the tests that match the filter will run.
+ //
+ // A filter is a colon-separated list of glob (not regex) patterns,
+ // optionally followed by a '-' and a colon-separated list of
+ // negative patterns (tests to exclude). A test is run if it
+ // matches one of the positive patterns and does not match any of
+ // the negative patterns.
+ //
+ // For example, *A*:Foo.* is a filter that matches any string that
+ // contains the character 'A' or starts with "Foo.".
+ bool should_run() const { return should_run_; }
+
+ // Returns true iff this test will appear in the XML report.
+ bool is_reportable() const {
+ // For now, the XML report includes all tests matching the filter.
+ // In the future, we may trim tests that are excluded because of
+ // sharding.
+ return matches_filter_;
+ }
+
+ // Returns the result of the test.
+ const TestResult* result() const { return &result_; }
+
+ private:
+#if GTEST_HAS_DEATH_TEST
+ friend class internal::DefaultDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+ friend class Test;
+ friend class TestCase;
+ friend class internal::UnitTestImpl;
+ friend class internal::StreamingListenerTest;
+ friend TestInfo* internal::MakeAndRegisterTestInfo(
+ const char* test_case_name,
+ const char* name,
+ const char* type_param,
+ const char* value_param,
+ internal::CodeLocation code_location,
+ internal::TypeId fixture_class_id,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ internal::TestFactoryBase* factory);
+
+ // Constructs a TestInfo object. The newly constructed instance assumes
+ // ownership of the factory object.
+ TestInfo(const std::string& test_case_name,
+ const std::string& name,
+ const char* a_type_param, // NULL if not a type-parameterized test
+ const char* a_value_param, // NULL if not a value-parameterized test
+ internal::CodeLocation a_code_location,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory);
+
+ // Increments the number of death tests encountered in this test so
+ // far.
+ int increment_death_test_count() {
+ return result_.increment_death_test_count();
+ }
+
+ // Creates the test object, runs it, records its result, and then
+ // deletes it.
+ void Run();
+
+ static void ClearTestResult(TestInfo* test_info) {
+ test_info->result_.Clear();
+ }
+
+ // These fields are immutable properties of the test.
+ const std::string test_case_name_; // Test case name
+ const std::string name_; // Test name
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const internal::scoped_ptr<const ::std::string> type_param_;
+ // Text representation of the value parameter, or NULL if this is not a
+ // value-parameterized test.
+ const internal::scoped_ptr<const ::std::string> value_param_;
+ internal::CodeLocation location_;
+ const internal::TypeId fixture_class_id_; // ID of the test fixture class
+ bool should_run_; // True iff this test should run
+ bool is_disabled_; // True iff this test is disabled
+ bool matches_filter_; // True if this test matches the
+ // user-specified filter.
+ internal::TestFactoryBase* const factory_; // The factory that creates
+ // the test object
+
+ // This field is mutable and needs to be reset before running the
+ // test for the second time.
+ TestResult result_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo);
+};
+
+// A test case, which consists of a vector of TestInfos.
+//
+// TestCase is not copyable.
+class GTEST_API_ TestCase {
+ public:
+ // Creates a TestCase with the given name.
+ //
+ // TestCase does NOT have a default constructor. Always use this
+ // constructor to create a TestCase object.
+ //
+ // Arguments:
+ //
+ // name: name of the test case
+ // a_type_param: the name of the test's type parameter, or NULL if
+ // this is not a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase(const char* name, const char* a_type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Destructor of TestCase.
+ virtual ~TestCase();
+
+ // Gets the name of the TestCase.
+ const char* name() const { return name_.c_str(); }
+
+ // Returns the name of the parameter type, or NULL if this is not a
+ // type-parameterized test case.
+ const char* type_param() const {
+ if (type_param_.get() != NULL)
+ return type_param_->c_str();
+ return NULL;
+ }
+
+ // Returns true if any test in this test case should run.
+ bool should_run() const { return should_run_; }
+
+ // Gets the number of successful tests in this test case.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests in this test case.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests in this test case.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Get the number of tests in this test case that should run.
+ int test_to_run_count() const;
+
+ // Gets the number of all tests in this test case.
+ int total_test_count() const;
+
+ // Returns true iff the test case passed.
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the test case failed.
+ bool Failed() const { return failed_test_count() > 0; }
+
+ // Returns the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ const TestInfo* GetTestInfo(int i) const;
+
+ // Returns the TestResult that holds test properties recorded during
+ // execution of SetUpTestCase and TearDownTestCase.
+ const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; }
+
+ private:
+ friend class Test;
+ friend class internal::UnitTestImpl;
+
+ // Gets the (mutable) vector of TestInfos in this TestCase.
+ std::vector<TestInfo*>& test_info_list() { return test_info_list_; }
+
+ // Gets the (immutable) vector of TestInfos in this TestCase.
+ const std::vector<TestInfo*>& test_info_list() const {
+ return test_info_list_;
+ }
+
+ // Returns the i-th test among all the tests. i can range from 0 to
+ // total_test_count() - 1. If i is not in that range, returns NULL.
+ TestInfo* GetMutableTestInfo(int i);
+
+ // Sets the should_run member.
+ void set_should_run(bool should) { should_run_ = should; }
+
+ // Adds a TestInfo to this test case. Will delete the TestInfo upon
+ // destruction of the TestCase object.
+ void AddTestInfo(TestInfo * test_info);
+
+ // Clears the results of all tests in this test case.
+ void ClearResult();
+
+ // Clears the results of all tests in the given test case.
+ static void ClearTestCaseResult(TestCase* test_case) {
+ test_case->ClearResult();
+ }
+
+ // Runs every test in this TestCase.
+ void Run();
+
+ // Runs SetUpTestCase() for this TestCase. This wrapper is needed
+ // for catching exceptions thrown from SetUpTestCase().
+ void RunSetUpTestCase() { (*set_up_tc_)(); }
+
+ // Runs TearDownTestCase() for this TestCase. This wrapper is
+ // needed for catching exceptions thrown from TearDownTestCase().
+ void RunTearDownTestCase() { (*tear_down_tc_)(); }
+
+ // Returns true iff test passed.
+ static bool TestPassed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Passed();
+ }
+
+ // Returns true iff test failed.
+ static bool TestFailed(const TestInfo* test_info) {
+ return test_info->should_run() && test_info->result()->Failed();
+ }
+
+ // Returns true iff the test is disabled and will be reported in the XML
+ // report.
+ static bool TestReportableDisabled(const TestInfo* test_info) {
+ return test_info->is_reportable() && test_info->is_disabled_;
+ }
+
+ // Returns true iff test is disabled.
+ static bool TestDisabled(const TestInfo* test_info) {
+ return test_info->is_disabled_;
+ }
+
+ // Returns true iff this test will appear in the XML report.
+ static bool TestReportable(const TestInfo* test_info) {
+ return test_info->is_reportable();
+ }
+
+ // Returns true if the given test should run.
+ static bool ShouldRunTest(const TestInfo* test_info) {
+ return test_info->should_run();
+ }
+
+ // Shuffles the tests in this test case.
+ void ShuffleTests(internal::Random* random);
+
+ // Restores the test order to before the first shuffle.
+ void UnshuffleTests();
+
+ // Name of the test case.
+ std::string name_;
+ // Name of the parameter type, or NULL if this is not a typed or a
+ // type-parameterized test.
+ const internal::scoped_ptr<const ::std::string> type_param_;
+ // The vector of TestInfos in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestInfo*> test_info_list_;
+ // Provides a level of indirection for the test list to allow easy
+ // shuffling and restoring the test order. The i-th element in this
+ // vector is the index of the i-th test in the shuffled test list.
+ std::vector<int> test_indices_;
+ // Pointer to the function that sets up the test case.
+ Test::SetUpTestCaseFunc set_up_tc_;
+ // Pointer to the function that tears down the test case.
+ Test::TearDownTestCaseFunc tear_down_tc_;
+ // True iff any test in this test case should run.
+ bool should_run_;
+ // Elapsed time, in milliseconds.
+ TimeInMillis elapsed_time_;
+ // Holds test properties recorded during execution of SetUpTestCase and
+ // TearDownTestCase.
+ TestResult ad_hoc_test_result_;
+
+ // We disallow copying TestCases.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase);
+};
+
+// An Environment object is capable of setting up and tearing down an
+// environment. You should subclass this to define your own
+// environment(s).
+//
+// An Environment object does the set-up and tear-down in virtual
+// methods SetUp() and TearDown() instead of the constructor and the
+// destructor, as:
+//
+// 1. You cannot safely throw from a destructor. This is a problem
+// as in some cases Google Test is used where exceptions are enabled, and
+// we may want to implement ASSERT_* using exceptions where they are
+// available.
+// 2. You cannot use ASSERT_* directly in a constructor or
+// destructor.
+class Environment {
+ public:
+ // The d'tor is virtual as we need to subclass Environment.
+ virtual ~Environment() {}
+
+ // Override this to define how to set up the environment.
+ virtual void SetUp() {}
+
+ // Override this to define how to tear down the environment.
+ virtual void TearDown() {}
+ private:
+ // If you see an error about overriding the following function or
+ // about it being private, you have mis-spelled SetUp() as Setup().
+ struct Setup_should_be_spelled_SetUp {};
+ virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; }
+};
+
+// The interface for tracing execution of tests. The methods are organized in
+// the order the corresponding events are fired.
+class TestEventListener {
+ public:
+ virtual ~TestEventListener() {}
+
+ // Fired before any test activity starts.
+ virtual void OnTestProgramStart(const UnitTest& unit_test) = 0;
+
+ // Fired before each iteration of tests starts. There may be more than
+ // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration
+ // index, starting from 0.
+ virtual void OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired before environment set-up for each iteration of tests starts.
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment set-up for each iteration of tests ends.
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0;
+
+ // Fired before the test case starts.
+ virtual void OnTestCaseStart(const TestCase& test_case) = 0;
+
+ // Fired before the test starts.
+ virtual void OnTestStart(const TestInfo& test_info) = 0;
+
+ // Fired after a failed assertion or a SUCCEED() invocation.
+ virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0;
+
+ // Fired after the test ends.
+ virtual void OnTestEnd(const TestInfo& test_info) = 0;
+
+ // Fired after the test case ends.
+ virtual void OnTestCaseEnd(const TestCase& test_case) = 0;
+
+ // Fired before environment tear-down for each iteration of tests starts.
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0;
+
+ // Fired after environment tear-down for each iteration of tests ends.
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0;
+
+ // Fired after each iteration of tests finishes.
+ virtual void OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) = 0;
+
+ // Fired after all test activities have ended.
+ virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0;
+};
+
+// The convenience class for users who need to override just one or two
+// methods and are not concerned that a possible change to a signature of
+// the methods they override will not be caught during the build. For
+// comments about each method please see the definition of TestEventListener
+// above.
+class EmptyTestEventListener : public TestEventListener {
+ public:
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& /*test_case*/) {}
+ virtual void OnTestStart(const TestInfo& /*test_info*/) {}
+ virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {}
+ virtual void OnTestEnd(const TestInfo& /*test_info*/) {}
+ virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {}
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/,
+ int /*iteration*/) {}
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+};
+
+// TestEventListeners lets users add listeners to track events in Google Test.
+class GTEST_API_ TestEventListeners {
+ public:
+ TestEventListeners();
+ ~TestEventListeners();
+
+ // Appends an event listener to the end of the list. Google Test assumes
+ // the ownership of the listener (i.e. it will delete the listener when
+ // the test program finishes).
+ void Append(TestEventListener* listener);
+
+ // Removes the given event listener from the list and returns it. It then
+ // becomes the caller's responsibility to delete the listener. Returns
+ // NULL if the listener is not found in the list.
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Returns the standard listener responsible for the default console
+ // output. Can be removed from the listeners list to shut down default
+ // console output. Note that removing this object from the listener list
+ // with Release transfers its ownership to the caller and makes this
+ // function return NULL the next time.
+ TestEventListener* default_result_printer() const {
+ return default_result_printer_;
+ }
+
+ // Returns the standard listener responsible for the default XML output
+ // controlled by the --gtest_output=xml flag. Can be removed from the
+ // listeners list by users who want to shut down the default XML output
+ // controlled by this flag and substitute it with custom one. Note that
+ // removing this object from the listener list with Release transfers its
+ // ownership to the caller and makes this function return NULL the next
+ // time.
+ TestEventListener* default_xml_generator() const {
+ return default_xml_generator_;
+ }
+
+ private:
+ friend class TestCase;
+ friend class TestInfo;
+ friend class internal::DefaultGlobalTestPartResultReporter;
+ friend class internal::NoExecDeathTest;
+ friend class internal::TestEventListenersAccessor;
+ friend class internal::UnitTestImpl;
+
+ // Returns repeater that broadcasts the TestEventListener events to all
+ // subscribers.
+ TestEventListener* repeater();
+
+ // Sets the default_result_printer attribute to the provided listener.
+ // The listener is also added to the listener list and previous
+ // default_result_printer is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultResultPrinter(TestEventListener* listener);
+
+ // Sets the default_xml_generator attribute to the provided listener. The
+ // listener is also added to the listener list and previous
+ // default_xml_generator is removed from it and deleted. The listener can
+ // also be NULL in which case it will not be added to the list. Does
+ // nothing if the previous and the current listener objects are the same.
+ void SetDefaultXmlGenerator(TestEventListener* listener);
+
+ // Controls whether events will be forwarded by the repeater to the
+ // listeners in the list.
+ bool EventForwardingEnabled() const;
+ void SuppressEventForwarding();
+
+ // The actual list of listeners.
+ internal::TestEventRepeater* repeater_;
+ // Listener responsible for the standard result output.
+ TestEventListener* default_result_printer_;
+ // Listener responsible for the creation of the XML output file.
+ TestEventListener* default_xml_generator_;
+
+ // We disallow copying TestEventListeners.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners);
+};
+
+// A UnitTest consists of a vector of TestCases.
+//
+// This is a singleton class. The only instance of UnitTest is
+// created when UnitTest::GetInstance() is first called. This
+// instance is never deleted.
+//
+// UnitTest is not copyable.
+//
+// This class is thread-safe as long as the methods are called
+// according to their specification.
+class GTEST_API_ UnitTest {
+ public:
+ // Gets the singleton UnitTest object. The first time this method
+ // is called, a UnitTest object is constructed and returned.
+ // Consecutive calls will return the same object.
+ static UnitTest* GetInstance();
+
+ // Runs all tests in this UnitTest object and prints the result.
+ // Returns 0 if successful, or 1 otherwise.
+ //
+ // This method can only be called from the main thread.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ int Run() GTEST_MUST_USE_RESULT_;
+
+ // Returns the working directory when the first TEST() or TEST_F()
+ // was executed. The UnitTest object owns the string.
+ const char* original_working_dir() const;
+
+ // Returns the TestCase object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestCase* current_test_case() const
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Returns the TestInfo object for the test that's currently running,
+ // or NULL if no test is running.
+ const TestInfo* current_test_info() const
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Returns the random seed used at the start of the current test run.
+ int random_seed() const;
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns the ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ //
+ // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry()
+ GTEST_LOCK_EXCLUDED_(mutex_);
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const;
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const;
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const;
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const;
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const;
+
+ // Returns the TestResult containing information on test failures and
+ // properties logged outside of individual test cases.
+ const TestResult& ad_hoc_test_result() const;
+
+ // Returns the list of event listeners that can be used to track events
+ // inside Google Test.
+ TestEventListeners& listeners();
+
+ private:
+ // Registers and returns a global test environment. When a test
+ // program is run, all global test environments will be set-up in
+ // the order they were registered. After all tests in the program
+ // have finished, all global test environments will be torn-down in
+ // the *reverse* order they were registered.
+ //
+ // The UnitTest object takes ownership of the given environment.
+ //
+ // This method can only be called from the main thread.
+ Environment* AddEnvironment(Environment* env);
+
+ // Adds a TestPartResult to the current TestResult object. All
+ // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc)
+ // eventually call this to report their results. The user code
+ // should use the assertion macros instead of calling this directly.
+ void AddTestPartResult(TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const std::string& message,
+ const std::string& os_stack_trace)
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Adds a TestProperty to the current TestResult object when invoked from
+ // inside a test, to current TestCase's ad_hoc_test_result_ when invoked
+ // from SetUpTestCase or TearDownTestCase, or to the global property set
+ // when invoked elsewhere. If the result already contains a property with
+ // the same key, the value will be updated.
+ void RecordProperty(const std::string& key, const std::string& value);
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i);
+
+ // Accessors for the implementation object.
+ internal::UnitTestImpl* impl() { return impl_; }
+ const internal::UnitTestImpl* impl() const { return impl_; }
+
+ // These classes and funcions are friends as they need to access private
+ // members of UnitTest.
+ friend class Test;
+ friend class internal::AssertHelper;
+ friend class internal::ScopedTrace;
+ friend class internal::StreamingListenerTest;
+ friend class internal::UnitTestRecordPropertyTestHelper;
+ friend Environment* AddGlobalTestEnvironment(Environment* env);
+ friend internal::UnitTestImpl* internal::GetUnitTestImpl();
+ friend void internal::ReportFailureInUnknownLocation(
+ TestPartResult::Type result_type,
+ const std::string& message);
+
+ // Creates an empty UnitTest.
+ UnitTest();
+
+ // D'tor
+ virtual ~UnitTest();
+
+ // Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+ // Google Test trace stack.
+ void PushGTestTrace(const internal::TraceInfo& trace)
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Pops a trace from the per-thread Google Test trace stack.
+ void PopGTestTrace()
+ GTEST_LOCK_EXCLUDED_(mutex_);
+
+ // Protects mutable state in *impl_. This is mutable as some const
+ // methods need to lock it too.
+ mutable internal::Mutex mutex_;
+
+ // Opaque implementation object. This field is never changed once
+ // the object is constructed. We don't mark it as const here, as
+ // doing so will cause a warning in the constructor of UnitTest.
+ // Mutable state in *impl_ is protected by mutex_.
+ internal::UnitTestImpl* impl_;
+
+ // We disallow copying UnitTest.
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest);
+};
+
+// A convenient wrapper for adding an environment for the test
+// program.
+//
+// You should call this before RUN_ALL_TESTS() is called, probably in
+// main(). If you use gtest_main, you need to call this before main()
+// starts for it to take effect. For example, you can define a global
+// variable like this:
+//
+// testing::Environment* const foo_env =
+// testing::AddGlobalTestEnvironment(new FooEnvironment);
+//
+// However, we strongly recommend you to write your own main() and
+// call AddGlobalTestEnvironment() there, as relying on initialization
+// of global variables makes the code harder to read and may cause
+// problems when you register multiple environments from different
+// translation units and the environments have dependencies among them
+// (remember that the compiler doesn't guarantee the order in which
+// global variables from different translation units are initialized).
+inline Environment* AddGlobalTestEnvironment(Environment* env) {
+ return UnitTest::GetInstance()->AddEnvironment(env);
+}
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+GTEST_API_ void InitGoogleTest(int* argc, char** argv);
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv);
+
+namespace internal {
+
+// Separate the error generating code from the code path to reduce the stack
+// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers
+// when calling EXPECT_* in a tight loop.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQFailure(const char* lhs_expression,
+ const char* rhs_expression,
+ const T1& lhs, const T2& rhs) {
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ FormatForComparisonFailureMessage(lhs, rhs),
+ FormatForComparisonFailureMessage(rhs, lhs),
+ false);
+}
+
+// The helper function for {ASSERT|EXPECT}_EQ.
+template <typename T1, typename T2>
+AssertionResult CmpHelperEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const T1& lhs,
+ const T2& rhs) {
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4389 /* signed/unsigned mismatch */)
+ if (lhs == rhs) {
+ return AssertionSuccess();
+ }
+GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+ return CmpHelperEQFailure(lhs_expression, rhs_expression, lhs, rhs);
+}
+
+// With this overloaded version, we allow anonymous enums to be used
+// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums
+// can be implicitly cast to BiggestInt.
+GTEST_API_ AssertionResult CmpHelperEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ BiggestInt lhs,
+ BiggestInt rhs);
+
+// The helper class for {ASSERT|EXPECT}_EQ. The template argument
+// lhs_is_null_literal is true iff the first argument to ASSERT_EQ()
+// is a null pointer literal. The following default implementation is
+// for lhs_is_null_literal being false.
+template <bool lhs_is_null_literal>
+class EqHelper {
+ public:
+ // This templatized version is for the general case.
+ template <typename T1, typename T2>
+ static AssertionResult Compare(const char* lhs_expression,
+ const char* rhs_expression,
+ const T1& lhs,
+ const T2& rhs) {
+ return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+ }
+
+ // With this overloaded version, we allow anonymous enums to be used
+ // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous
+ // enums can be implicitly cast to BiggestInt.
+ //
+ // Even though its body looks the same as the above version, we
+ // cannot merge the two, as it will make anonymous enums unhappy.
+ static AssertionResult Compare(const char* lhs_expression,
+ const char* rhs_expression,
+ BiggestInt lhs,
+ BiggestInt rhs) {
+ return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+ }
+};
+
+// This specialization is used when the first argument to ASSERT_EQ()
+// is a null pointer literal, like NULL, false, or 0.
+template <>
+class EqHelper<true> {
+ public:
+ // We define two overloaded versions of Compare(). The first
+ // version will be picked when the second argument to ASSERT_EQ() is
+ // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or
+ // EXPECT_EQ(false, a_bool).
+ template <typename T1, typename T2>
+ static AssertionResult Compare(
+ const char* lhs_expression,
+ const char* rhs_expression,
+ const T1& lhs,
+ const T2& rhs,
+ // The following line prevents this overload from being considered if T2
+ // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr)
+ // expands to Compare("", "", NULL, my_ptr), which requires a conversion
+ // to match the Secret* in the other overload, which would otherwise make
+ // this template match better.
+ typename EnableIf<!is_pointer<T2>::value>::type* = 0) {
+ return CmpHelperEQ(lhs_expression, rhs_expression, lhs, rhs);
+ }
+
+ // This version will be picked when the second argument to ASSERT_EQ() is a
+ // pointer, e.g. ASSERT_EQ(NULL, a_pointer).
+ template <typename T>
+ static AssertionResult Compare(
+ const char* lhs_expression,
+ const char* rhs_expression,
+ // We used to have a second template parameter instead of Secret*. That
+ // template parameter would deduce to 'long', making this a better match
+ // than the first overload even without the first overload's EnableIf.
+ // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to
+ // non-pointer argument" (even a deduced integral argument), so the old
+ // implementation caused warnings in user code.
+ Secret* /* lhs (NULL) */,
+ T* rhs) {
+ // We already know that 'lhs' is a null pointer.
+ return CmpHelperEQ(lhs_expression, rhs_expression,
+ static_cast<T*>(NULL), rhs);
+ }
+};
+
+// Separate the error generating code from the code path to reduce the stack
+// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers
+// when calling EXPECT_OP in a tight loop.
+template <typename T1, typename T2>
+AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2,
+ const T1& val1, const T2& val2,
+ const char* op) {
+ return AssertionFailure()
+ << "Expected: (" << expr1 << ") " << op << " (" << expr2
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste
+// of similar code.
+//
+// For each templatized helper function, we also define an overloaded
+// version for BiggestInt in order to reduce code bloat and allow
+// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled
+// with gcc 4.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+template <typename T1, typename T2>\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ const T1& val1, const T2& val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\
+ }\
+}\
+GTEST_API_ AssertionResult CmpHelper##op_name(\
+ const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2)
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+
+// Implements the helper function for {ASSERT|EXPECT}_NE
+GTEST_IMPL_CMP_HELPER_(NE, !=);
+// Implements the helper function for {ASSERT|EXPECT}_LE
+GTEST_IMPL_CMP_HELPER_(LE, <=);
+// Implements the helper function for {ASSERT|EXPECT}_LT
+GTEST_IMPL_CMP_HELPER_(LT, <);
+// Implements the helper function for {ASSERT|EXPECT}_GE
+GTEST_IMPL_CMP_HELPER_(GE, >=);
+// Implements the helper function for {ASSERT|EXPECT}_GT
+GTEST_IMPL_CMP_HELPER_(GT, >);
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2);
+
+
+// Helper function for *_STREQ on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTREQ(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+// Helper function for *_STRNE on wide strings.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2);
+
+} // namespace internal
+
+// IsSubstring() and IsNotSubstring() are intended to be used as the
+// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by
+// themselves. They check whether needle is a substring of haystack
+// (NULL is considered a substring of itself only), and return an
+// appropriate error message when they fail.
+//
+// The {needle,haystack}_expr arguments are the stringified
+// expressions that generated the two real arguments.
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack);
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack);
+
+#if GTEST_HAS_STD_WSTRING
+GTEST_API_ AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+GTEST_API_ AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack);
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+// Helper template function for comparing floating-points.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+template <typename RawType>
+AssertionResult CmpHelperFloatingPointEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ RawType lhs_value,
+ RawType rhs_value) {
+ const FloatingPoint<RawType> lhs(lhs_value), rhs(rhs_value);
+
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ ::std::stringstream lhs_ss;
+ lhs_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << lhs_value;
+
+ ::std::stringstream rhs_ss;
+ rhs_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << rhs_value;
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ StringStreamToString(&lhs_ss),
+ StringStreamToString(&rhs_ss),
+ false);
+}
+
+// Helper function for implementing ASSERT_NEAR.
+//
+// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM.
+GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error);
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+// A class that enables one to stream messages to assertion macros
+class GTEST_API_ AssertHelper {
+ public:
+ // Constructor.
+ AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message);
+ ~AssertHelper();
+
+ // Message assignment is a semantic trick to enable assertion
+ // streaming; see the GTEST_MESSAGE_ macro below.
+ void operator=(const Message& message) const;
+
+ private:
+ // We put our data in a struct so that the size of the AssertHelper class can
+ // be as small as possible. This is important because gcc is incapable of
+ // re-using stack space even for temporary variables, so every EXPECT_EQ
+ // reserves stack space for another AssertHelper.
+ struct AssertHelperData {
+ AssertHelperData(TestPartResult::Type t,
+ const char* srcfile,
+ int line_num,
+ const char* msg)
+ : type(t), file(srcfile), line(line_num), message(msg) { }
+
+ TestPartResult::Type const type;
+ const char* const file;
+ int const line;
+ std::string const message;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData);
+ };
+
+ AssertHelperData* const data_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper);
+};
+
+} // namespace internal
+
+#if GTEST_HAS_PARAM_TEST
+// The pure interface class that all value-parameterized tests inherit from.
+// A value-parameterized class must inherit from both ::testing::Test and
+// ::testing::WithParamInterface. In most cases that just means inheriting
+// from ::testing::TestWithParam, but more complicated test hierarchies
+// may need to inherit from Test and WithParamInterface at different levels.
+//
+// This interface has support for accessing the test parameter value via
+// the GetParam() method.
+//
+// Use it with one of the parameter generator defining functions, like Range(),
+// Values(), ValuesIn(), Bool(), and Combine().
+//
+// class FooTest : public ::testing::TestWithParam<int> {
+// protected:
+// FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual ~FooTest() {
+// // Can use GetParam() here.
+// }
+// virtual void SetUp() {
+// // Can use GetParam() here.
+// }
+// virtual void TearDown {
+// // Can use GetParam() here.
+// }
+// };
+// TEST_P(FooTest, DoesBar) {
+// // Can use GetParam() method here.
+// Foo foo;
+// ASSERT_TRUE(foo.DoesBar(GetParam()));
+// }
+// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10));
+
+template <typename T>
+class WithParamInterface {
+ public:
+ typedef T ParamType;
+ virtual ~WithParamInterface() {}
+
+ // The current parameter value. Is also available in the test fixture's
+ // constructor. This member function is non-static, even though it only
+ // references static data, to reduce the opportunity for incorrect uses
+ // like writing 'WithParamInterface<bool>::GetParam()' for a test that
+ // uses a fixture whose parameter type is int.
+ const ParamType& GetParam() const {
+ GTEST_CHECK_(parameter_ != NULL)
+ << "GetParam() can only be called inside a value-parameterized test "
+ << "-- did you intend to write TEST_P instead of TEST_F?";
+ return *parameter_;
+ }
+
+ private:
+ // Sets parameter value. The caller is responsible for making sure the value
+ // remains alive and unchanged throughout the current test.
+ static void SetParam(const ParamType* parameter) {
+ parameter_ = parameter;
+ }
+
+ // Static value used for accessing parameter during a test lifetime.
+ static const ParamType* parameter_;
+
+ // TestClass must be a subclass of WithParamInterface<T> and Test.
+ template <class TestClass> friend class internal::ParameterizedTestFactory;
+};
+
+template <typename T>
+const T* WithParamInterface<T>::parameter_ = NULL;
+
+// Most value-parameterized classes can ignore the existence of
+// WithParamInterface, and can just inherit from ::testing::TestWithParam.
+
+template <typename T>
+class TestWithParam : public Test, public WithParamInterface<T> {
+};
+
+#endif // GTEST_HAS_PARAM_TEST
+
+// Macros for indicating success/failure in test code.
+
+// ADD_FAILURE unconditionally adds a failure to the current test.
+// SUCCEED generates a success - it doesn't automatically make the
+// current test successful, as a test is only successful when it has
+// no failure.
+//
+// EXPECT_* verifies that a certain condition is satisfied. If not,
+// it behaves like ADD_FAILURE. In particular:
+//
+// EXPECT_TRUE verifies that a Boolean condition is true.
+// EXPECT_FALSE verifies that a Boolean condition is false.
+//
+// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except
+// that they will also abort the current function on failure. People
+// usually want the fail-fast behavior of FAIL and ASSERT_*, but those
+// writing data-driven tests often find themselves using ADD_FAILURE
+// and EXPECT_* more.
+
+// Generates a nonfatal failure with a generic message.
+#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed")
+
+// Generates a nonfatal failure at the given source file location with
+// a generic message.
+#define ADD_FAILURE_AT(file, line) \
+ GTEST_MESSAGE_AT_(file, line, "Failed", \
+ ::testing::TestPartResult::kNonFatalFailure)
+
+// Generates a fatal failure with a generic message.
+#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed")
+
+// Define this macro to 1 to omit the definition of FAIL(), which is a
+// generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_FAIL
+# define FAIL() GTEST_FAIL()
+#endif
+
+// Generates a success with a generic message.
+#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded")
+
+// Define this macro to 1 to omit the definition of SUCCEED(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_SUCCEED
+# define SUCCEED() GTEST_SUCCEED()
+#endif
+
+// Macros for testing exceptions.
+//
+// * {ASSERT|EXPECT}_THROW(statement, expected_exception):
+// Tests that the statement throws the expected exception.
+// * {ASSERT|EXPECT}_NO_THROW(statement):
+// Tests that the statement doesn't throw any exception.
+// * {ASSERT|EXPECT}_ANY_THROW(statement):
+// Tests that the statement throws an exception.
+
+#define EXPECT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_THROW(statement, expected_exception) \
+ GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_)
+#define ASSERT_NO_THROW(statement) \
+ GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_)
+#define ASSERT_ANY_THROW(statement) \
+ GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_)
+
+// Boolean assertions. Condition can be either a Boolean expression or an
+// AssertionResult. For more information on how to use AssertionResult with
+// these macros see comments on that class.
+#define EXPECT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_((condition), #condition, false, true, \
+ GTEST_NONFATAL_FAILURE_)
+#define EXPECT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_NONFATAL_FAILURE_)
+#define ASSERT_TRUE(condition) \
+ GTEST_TEST_BOOLEAN_((condition), #condition, false, true, \
+ GTEST_FATAL_FAILURE_)
+#define ASSERT_FALSE(condition) \
+ GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \
+ GTEST_FATAL_FAILURE_)
+
+// Includes the auto-generated header that implements a family of
+// generic predicate assertion macros.
+#include "gtest/gtest_pred_impl.h"
+
+// Macros for testing equalities and inequalities.
+//
+// * {ASSERT|EXPECT}_EQ(v1, v2): Tests that v1 == v2
+// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2
+// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2
+// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2
+// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2
+// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2
+//
+// When they are not, Google Test prints both the tested expressions and
+// their actual values. The values must be compatible built-in types,
+// or you will get a compiler error. By "compatible" we mean that the
+// values can be compared by the respective operator.
+//
+// Note:
+//
+// 1. It is possible to make a user-defined type work with
+// {ASSERT|EXPECT}_??(), but that requires overloading the
+// comparison operators and is thus discouraged by the Google C++
+// Usage Guide. Therefore, you are advised to use the
+// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are
+// equal.
+//
+// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on
+// pointers (in particular, C strings). Therefore, if you use it
+// with two C strings, you are testing how their locations in memory
+// are related, not how their content is related. To compare two C
+// strings by content, use {ASSERT|EXPECT}_STR*().
+//
+// 3. {ASSERT|EXPECT}_EQ(v1, v2) is preferred to
+// {ASSERT|EXPECT}_TRUE(v1 == v2), as the former tells you
+// what the actual value is when it fails, and similarly for the
+// other comparisons.
+//
+// 4. Do not depend on the order in which {ASSERT|EXPECT}_??()
+// evaluate their arguments, which is undefined.
+//
+// 5. These macros evaluate their arguments exactly once.
+//
+// Examples:
+//
+// EXPECT_NE(5, Foo());
+// EXPECT_EQ(NULL, a_pointer);
+// ASSERT_LT(i, array_size);
+// ASSERT_GT(records.size(), 0) << "There is no record left.";
+
+#define EXPECT_EQ(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(val1)>::Compare, \
+ val1, val2)
+#define EXPECT_NE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define EXPECT_LE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define EXPECT_LT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define EXPECT_GE(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define EXPECT_GT(val1, val2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+#define GTEST_ASSERT_EQ(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal:: \
+ EqHelper<GTEST_IS_NULL_LITERAL_(val1)>::Compare, \
+ val1, val2)
+#define GTEST_ASSERT_NE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2)
+#define GTEST_ASSERT_LE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2)
+#define GTEST_ASSERT_LT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2)
+#define GTEST_ASSERT_GE(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2)
+#define GTEST_ASSERT_GT(val1, val2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2)
+
+// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of
+// ASSERT_XY(), which clashes with some users' own code.
+
+#if !GTEST_DONT_DEFINE_ASSERT_EQ
+# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_NE
+# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LE
+# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_LT
+# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GE
+# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2)
+#endif
+
+#if !GTEST_DONT_DEFINE_ASSERT_GT
+# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2)
+#endif
+
+// C-string Comparisons. All tests treat NULL and any non-NULL string
+// as different. Two NULLs are equal.
+//
+// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2
+// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2
+// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case
+// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case
+//
+// For wide or narrow string objects, you can use the
+// {ASSERT|EXPECT}_??() macros.
+//
+// Don't depend on the order in which the arguments are evaluated,
+// which is undefined.
+//
+// These macros evaluate their arguments exactly once.
+
+#define EXPECT_STREQ(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2)
+#define EXPECT_STRNE(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define EXPECT_STRCASEEQ(s1, s2) \
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2)
+#define EXPECT_STRCASENE(s1, s2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+#define ASSERT_STREQ(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, s1, s2)
+#define ASSERT_STRNE(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2)
+#define ASSERT_STRCASEEQ(s1, s2) \
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, s1, s2)
+#define ASSERT_STRCASENE(s1, s2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2)
+
+// Macros for comparing floating-point numbers.
+//
+// * {ASSERT|EXPECT}_FLOAT_EQ(val1, val2):
+// Tests that two float values are almost equal.
+// * {ASSERT|EXPECT}_DOUBLE_EQ(val1, val2):
+// Tests that two double values are almost equal.
+// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error):
+// Tests that v1 and v2 are within the given distance to each other.
+//
+// Google Test uses ULP-based comparison to automatically pick a default
+// error bound that is appropriate for the operands. See the
+// FloatingPoint template class in gtest-internal.h if you are
+// interested in the implementation details.
+
+#define EXPECT_FLOAT_EQ(val1, val2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ val1, val2)
+
+#define EXPECT_DOUBLE_EQ(val1, val2)\
+ EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ val1, val2)
+
+#define ASSERT_FLOAT_EQ(val1, val2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<float>, \
+ val1, val2)
+
+#define ASSERT_DOUBLE_EQ(val1, val2)\
+ ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ<double>, \
+ val1, val2)
+
+#define EXPECT_NEAR(val1, val2, abs_error)\
+ EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+#define ASSERT_NEAR(val1, val2, abs_error)\
+ ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \
+ val1, val2, abs_error)
+
+// These predicate format functions work on floating-point values, and
+// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g.
+//
+// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0);
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2);
+GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2);
+
+
+#if GTEST_OS_WINDOWS
+
+// Macros that test for HRESULT failure and success, these are only useful
+// on Windows, and rely on Windows SDK macros and APIs to compile.
+//
+// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr)
+//
+// When expr unexpectedly fails or succeeds, Google Test prints the
+// expected result and the actual result with both a human-readable
+// string representation of the error, if available, as well as the
+// hex result code.
+# define EXPECT_HRESULT_SUCCEEDED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define ASSERT_HRESULT_SUCCEEDED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr))
+
+# define EXPECT_HRESULT_FAILED(expr) \
+ EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+# define ASSERT_HRESULT_FAILED(expr) \
+ ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr))
+
+#endif // GTEST_OS_WINDOWS
+
+// Macros that execute statement and check that it doesn't generate new fatal
+// failures in the current thread.
+//
+// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement);
+//
+// Examples:
+//
+// EXPECT_NO_FATAL_FAILURE(Process());
+// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed";
+//
+#define ASSERT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_)
+#define EXPECT_NO_FATAL_FAILURE(statement) \
+ GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_)
+
+// Causes a trace (including the source file path, the current line
+// number, and the given message) to be included in every test failure
+// message generated by code in the current scope. The effect is
+// undone when the control leaves the current scope.
+//
+// The message argument can be anything streamable to std::ostream.
+//
+// In the implementation, we include the current line number as part
+// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s
+// to appear in the same block - as long as they are on different
+// lines.
+#define SCOPED_TRACE(message) \
+ ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\
+ __FILE__, __LINE__, ::testing::Message() << (message))
+
+// Compile-time assertion for type equality.
+// StaticAssertTypeEq<type1, type2>() compiles iff type1 and type2 are
+// the same type. The value it returns is not interesting.
+//
+// Instead of making StaticAssertTypeEq a class template, we make it a
+// function template that invokes a helper class template. This
+// prevents a user from misusing StaticAssertTypeEq<T1, T2> by
+// defining objects of that type.
+//
+// CAVEAT:
+//
+// When used inside a method of a class template,
+// StaticAssertTypeEq<T1, T2>() is effective ONLY IF the method is
+// instantiated. For example, given:
+//
+// template <typename T> class Foo {
+// public:
+// void Bar() { testing::StaticAssertTypeEq<int, T>(); }
+// };
+//
+// the code:
+//
+// void Test1() { Foo<bool> foo; }
+//
+// will NOT generate a compiler error, as Foo<bool>::Bar() is never
+// actually instantiated. Instead, you need:
+//
+// void Test2() { Foo<bool> foo; foo.Bar(); }
+//
+// to cause a compiler error.
+template <typename T1, typename T2>
+bool StaticAssertTypeEq() {
+ (void)internal::StaticAssertTypeEqHelper<T1, T2>();
+ return true;
+}
+
+// Defines a test.
+//
+// The first parameter is the name of the test case, and the second
+// parameter is the name of the test within the test case.
+//
+// The convention is to end the test case name with "Test". For
+// example, a test case for the Foo class can be named FooTest.
+//
+// Test code should appear between braces after an invocation of
+// this macro. Example:
+//
+// TEST(FooTest, InitializesCorrectly) {
+// Foo foo;
+// EXPECT_TRUE(foo.StatusIsOK());
+// }
+
+// Note that we call GetTestTypeId() instead of GetTypeId<
+// ::testing::Test>() here to get the type ID of testing::Test. This
+// is to work around a suspected linker bug when using Google Test as
+// a framework on Mac OS X. The bug causes GetTypeId<
+// ::testing::Test>() to return different values depending on whether
+// the call is from the Google Test framework itself or from user test
+// code. GetTestTypeId() is guaranteed to always return the same
+// value, as it always calls GetTypeId<>() from the Google Test
+// framework.
+#define GTEST_TEST(test_case_name, test_name)\
+ GTEST_TEST_(test_case_name, test_name, \
+ ::testing::Test, ::testing::internal::GetTestTypeId())
+
+// Define this macro to 1 to omit the definition of TEST(), which
+// is a generic name and clashes with some other libraries.
+#if !GTEST_DONT_DEFINE_TEST
+# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name)
+#endif
+
+// Defines a test that uses a test fixture.
+//
+// The first parameter is the name of the test fixture class, which
+// also doubles as the test case name. The second parameter is the
+// name of the test within the test case.
+//
+// A test fixture class must be declared earlier. The user should put
+// his test code between braces after using this macro. Example:
+//
+// class FooTest : public testing::Test {
+// protected:
+// virtual void SetUp() { b_.AddElement(3); }
+//
+// Foo a_;
+// Foo b_;
+// };
+//
+// TEST_F(FooTest, InitializesCorrectly) {
+// EXPECT_TRUE(a_.StatusIsOK());
+// }
+//
+// TEST_F(FooTest, ReturnsElementCountCorrectly) {
+// EXPECT_EQ(0, a_.size());
+// EXPECT_EQ(1, b_.size());
+// }
+
+#define TEST_F(test_fixture, test_name)\
+ GTEST_TEST_(test_fixture, test_name, test_fixture, \
+ ::testing::internal::GetTypeId<test_fixture>())
+
+} // namespace testing
+
+// Use this function in main() to run all tests. It returns 0 if all
+// tests are successful, or 1 otherwise.
+//
+// RUN_ALL_TESTS() should be invoked after the command line has been
+// parsed by InitGoogleTest().
+//
+// This function was formerly a macro; thus, it is in the global
+// namespace and has an all-caps name.
+int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_;
+
+inline int RUN_ALL_TESTS() {
+ return ::testing::UnitTest::GetInstance()->Run();
+}
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_pred_impl.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_pred_impl.h
new file mode 100644
index 000000000..30ae712f5
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_pred_impl.h
@@ -0,0 +1,358 @@
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command
+// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND!
+//
+// Implements a family of generic predicate assertion macros.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
+
+// Makes sure this header is not included before gtest.h.
+#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
+# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
+#endif // GTEST_INCLUDE_GTEST_GTEST_H_
+
+// This header implements a family of generic predicate assertion
+// macros:
+//
+// ASSERT_PRED_FORMAT1(pred_format, v1)
+// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
+// ...
+//
+// where pred_format is a function or functor that takes n (in the
+// case of ASSERT_PRED_FORMATn) values and their source expression
+// text, and returns a testing::AssertionResult. See the definition
+// of ASSERT_EQ in gtest.h for an example.
+//
+// If you don't care about formatting, you can use the more
+// restrictive version:
+//
+// ASSERT_PRED1(pred, v1)
+// ASSERT_PRED2(pred, v1, v2)
+// ...
+//
+// where pred is an n-ary function or functor that returns bool,
+// and the values v1, v2, ..., must support the << operator for
+// streaming to std::ostream.
+//
+// We also define the EXPECT_* variations.
+//
+// For now we only support predicates whose arity is at most 5.
+// Please email googletestframework@googlegroups.com if you need
+// support for higher arities.
+
+// GTEST_ASSERT_ is the basic statement to which all of the assertions
+// in this file reduce. Don't use this in your code.
+
+#define GTEST_ASSERT_(expression, on_failure) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar = (expression)) \
+ ; \
+ else \
+ on_failure(gtest_ar.failure_message())
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1>
+AssertionResult AssertPred1Helper(const char* pred_text,
+ const char* e1,
+ Pred pred,
+ const T1& v1) {
+ if (pred(v1)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, v1), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use
+// this in your code.
+#define GTEST_PRED1_(pred, v1, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \
+ #v1, \
+ pred, \
+ v1), on_failure)
+
+// Unary predicate assertion macros.
+#define EXPECT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT1(pred_format, v1) \
+ GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED1(pred, v1) \
+ GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2>
+AssertionResult AssertPred2Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ Pred pred,
+ const T1& v1,
+ const T2& v2) {
+ if (pred(v1, v2)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use
+// this in your code.
+#define GTEST_PRED2_(pred, v1, v2, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \
+ #v1, \
+ #v2, \
+ pred, \
+ v1, \
+ v2), on_failure)
+
+// Binary predicate assertion macros.
+#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \
+ GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED2(pred, v1, v2) \
+ GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3>
+AssertionResult AssertPred3Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3) {
+ if (pred(v1, v2, v3)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use
+// this in your code.
+#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ pred, \
+ v1, \
+ v2, \
+ v3), on_failure)
+
+// Ternary predicate assertion macros.
+#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \
+ GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED3(pred, v1, v2, v3) \
+ GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4>
+AssertionResult AssertPred4Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4) {
+ if (pred(v1, v2, v3, v4)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use
+// this in your code.
+#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4), on_failure)
+
+// 4-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \
+ GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED4(pred, v1, v2, v3, v4) \
+ GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_)
+
+
+
+// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+template <typename Pred,
+ typename T1,
+ typename T2,
+ typename T3,
+ typename T4,
+ typename T5>
+AssertionResult AssertPred5Helper(const char* pred_text,
+ const char* e1,
+ const char* e2,
+ const char* e3,
+ const char* e4,
+ const char* e5,
+ Pred pred,
+ const T1& v1,
+ const T2& v2,
+ const T3& v3,
+ const T4& v4,
+ const T5& v5) {
+ if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess();
+
+ return AssertionFailure() << pred_text << "("
+ << e1 << ", "
+ << e2 << ", "
+ << e3 << ", "
+ << e4 << ", "
+ << e5 << ") evaluates to false, where"
+ << "\n" << e1 << " evaluates to " << v1
+ << "\n" << e2 << " evaluates to " << v2
+ << "\n" << e3 << " evaluates to " << v3
+ << "\n" << e4 << " evaluates to " << v4
+ << "\n" << e5 << " evaluates to " << v5;
+}
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5.
+// Don't use this in your code.
+#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \
+ on_failure)
+
+// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use
+// this in your code.
+#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\
+ GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \
+ #v1, \
+ #v2, \
+ #v3, \
+ #v4, \
+ #v5, \
+ pred, \
+ v1, \
+ v2, \
+ v3, \
+ v4, \
+ v5), on_failure)
+
+// 5-ary predicate assertion macros.
+#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_)
+#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \
+ GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \
+ GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_)
+
+
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_prod.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
new file mode 100644
index 000000000..da80ddc6c
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/gtest_prod.h
@@ -0,0 +1,58 @@
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// Google C++ Testing Framework definitions useful in production code.
+
+#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_
+
+// When you need to test the private or protected members of a class,
+// use the FRIEND_TEST macro to declare your tests as friends of the
+// class. For example:
+//
+// class MyClass {
+// private:
+// void MyMethod();
+// FRIEND_TEST(MyClassTest, MyMethod);
+// };
+//
+// class MyClassTest : public testing::Test {
+// // ...
+// };
+//
+// TEST_F(MyClassTest, MyMethod) {
+// // Can call MyClass::MyMethod() here.
+// }
+
+#define FRIEND_TEST(test_case_name, test_name)\
+friend class test_case_name##_##test_name##_Test
+
+#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-port.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-port.h
new file mode 100644
index 000000000..7e744bd3b
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-port.h
@@ -0,0 +1,69 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Injection point for custom user configurations.
+// The following macros can be defined:
+//
+// Flag related macros:
+// GTEST_FLAG(flag_name)
+// GTEST_USE_OWN_FLAGFILE_FLAG_ - Define to 0 when the system provides its
+// own flagfile flag parsing.
+// GTEST_DECLARE_bool_(name)
+// GTEST_DECLARE_int32_(name)
+// GTEST_DECLARE_string_(name)
+// GTEST_DEFINE_bool_(name, default_val, doc)
+// GTEST_DEFINE_int32_(name, default_val, doc)
+// GTEST_DEFINE_string_(name, default_val, doc)
+//
+// Test filtering:
+// GTEST_TEST_FILTER_ENV_VAR_ - The name of an environment variable that
+// will be used if --GTEST_FLAG(test_filter)
+// is not provided.
+//
+// Logging:
+// GTEST_LOG_(severity)
+// GTEST_CHECK_(condition)
+// Functions LogToStderr() and FlushInfoLog() have to be provided too.
+//
+// Threading:
+// GTEST_HAS_NOTIFICATION_ - Enabled if Notification is already provided.
+// GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ - Enabled if Mutex and ThreadLocal are
+// already provided.
+// Must also provide GTEST_DECLARE_STATIC_MUTEX_(mutex) and
+// GTEST_DEFINE_STATIC_MUTEX_(mutex)
+//
+// GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
+// GTEST_LOCK_EXCLUDED_(locks)
+//
+// ** Custom implementation starts here **
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PORT_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-printers.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-printers.h
new file mode 100644
index 000000000..60c1ea050
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest-printers.h
@@ -0,0 +1,42 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// This file provides an injection point for custom printers in a local
+// installation of gTest.
+// It will be included from gtest-printers.h and the overrides in this file
+// will be visible to everyone.
+// See documentation at gtest/gtest-printers.h for details on how to define a
+// custom printer.
+//
+// ** Custom implementation starts here **
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_PRINTERS_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest.h
new file mode 100644
index 000000000..c27412a89
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/custom/gtest.h
@@ -0,0 +1,41 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Injection point for custom user configurations.
+// The following macros can be defined:
+//
+// GTEST_OS_STACK_TRACE_GETTER_ - The name of an implementation of
+// OsStackTraceGetterInterface.
+//
+// ** Custom implementation starts here **
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_CUSTOM_GTEST_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-death-test-internal.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-death-test-internal.h
new file mode 100644
index 000000000..2b3a78f5b
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-death-test-internal.h
@@ -0,0 +1,319 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines internal utilities needed for implementing
+// death tests. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
+
+#include "gtest/internal/gtest-internal.h"
+
+#include <stdio.h>
+
+namespace testing {
+namespace internal {
+
+GTEST_DECLARE_string_(internal_run_death_test);
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kDeathTestStyleFlag[] = "death_test_style";
+const char kDeathTestUseFork[] = "death_test_use_fork";
+const char kInternalRunDeathTestFlag[] = "internal_run_death_test";
+
+#if GTEST_HAS_DEATH_TEST
+
+// DeathTest is a class that hides much of the complexity of the
+// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method
+// returns a concrete class that depends on the prevailing death test
+// style, as defined by the --gtest_death_test_style and/or
+// --gtest_internal_run_death_test flags.
+
+// In describing the results of death tests, these terms are used with
+// the corresponding definitions:
+//
+// exit status: The integer exit information in the format specified
+// by wait(2)
+// exit code: The integer code passed to exit(3), _exit(2), or
+// returned from main()
+class GTEST_API_ DeathTest {
+ public:
+ // Create returns false if there was an error determining the
+ // appropriate action to take for the current death test; for example,
+ // if the gtest_death_test_style flag is set to an invalid value.
+ // The LastMessage method will return a more detailed message in that
+ // case. Otherwise, the DeathTest pointer pointed to by the "test"
+ // argument is set. If the death test should be skipped, the pointer
+ // is set to NULL; otherwise, it is set to the address of a new concrete
+ // DeathTest object that controls the execution of the current test.
+ static bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+ DeathTest();
+ virtual ~DeathTest() { }
+
+ // A helper class that aborts a death test when it's deleted.
+ class ReturnSentinel {
+ public:
+ explicit ReturnSentinel(DeathTest* test) : test_(test) { }
+ ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); }
+ private:
+ DeathTest* const test_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel);
+ } GTEST_ATTRIBUTE_UNUSED_;
+
+ // An enumeration of possible roles that may be taken when a death
+ // test is encountered. EXECUTE means that the death test logic should
+ // be executed immediately. OVERSEE means that the program should prepare
+ // the appropriate environment for a child process to execute the death
+ // test, then wait for it to complete.
+ enum TestRole { OVERSEE_TEST, EXECUTE_TEST };
+
+ // An enumeration of the three reasons that a test might be aborted.
+ enum AbortReason {
+ TEST_ENCOUNTERED_RETURN_STATEMENT,
+ TEST_THREW_EXCEPTION,
+ TEST_DID_NOT_DIE
+ };
+
+ // Assumes one of the above roles.
+ virtual TestRole AssumeRole() = 0;
+
+ // Waits for the death test to finish and returns its status.
+ virtual int Wait() = 0;
+
+ // Returns true if the death test passed; that is, the test process
+ // exited during the test, its exit status matches a user-supplied
+ // predicate, and its stderr output matches a user-supplied regular
+ // expression.
+ // The user-supplied predicate may be a macro expression rather
+ // than a function pointer or functor, or else Wait and Passed could
+ // be combined.
+ virtual bool Passed(bool exit_status_ok) = 0;
+
+ // Signals that the death test did not die as expected.
+ virtual void Abort(AbortReason reason) = 0;
+
+ // Returns a human-readable outcome message regarding the outcome of
+ // the last death test.
+ static const char* LastMessage();
+
+ static void set_last_death_test_message(const std::string& message);
+
+ private:
+ // A string containing a description of the outcome of the last death test.
+ static std::string last_death_test_message_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest);
+};
+
+// Factory interface for death tests. May be mocked out for testing.
+class DeathTestFactory {
+ public:
+ virtual ~DeathTestFactory() { }
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) = 0;
+};
+
+// A concrete DeathTestFactory implementation for normal use.
+class DefaultDeathTestFactory : public DeathTestFactory {
+ public:
+ virtual bool Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test);
+};
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+GTEST_API_ bool ExitedUnsuccessfully(int exit_status);
+
+// Traps C++ exceptions escaping statement and reports them as test
+// failures. Note that trapping SEH exceptions is not implemented here.
+# if GTEST_HAS_EXCEPTIONS
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } catch (const ::std::exception& gtest_exception) { \
+ fprintf(\
+ stderr, \
+ "\n%s: Caught std::exception-derived exception escaping the " \
+ "death test statement. Exception message: %s\n", \
+ ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \
+ gtest_exception.what()); \
+ fflush(stderr); \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ } catch (...) { \
+ death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \
+ }
+
+# else
+# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement)
+
+# endif
+
+// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*,
+// ASSERT_EXIT*, and EXPECT_EXIT*.
+# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ const ::testing::internal::RE& gtest_regex = (regex); \
+ ::testing::internal::DeathTest* gtest_dt; \
+ if (!::testing::internal::DeathTest::Create(#statement, &gtest_regex, \
+ __FILE__, __LINE__, &gtest_dt)) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ if (gtest_dt != NULL) { \
+ ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \
+ gtest_dt_ptr(gtest_dt); \
+ switch (gtest_dt->AssumeRole()) { \
+ case ::testing::internal::DeathTest::OVERSEE_TEST: \
+ if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \
+ } \
+ break; \
+ case ::testing::internal::DeathTest::EXECUTE_TEST: { \
+ ::testing::internal::DeathTest::ReturnSentinel \
+ gtest_sentinel(gtest_dt); \
+ GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \
+ gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \
+ break; \
+ } \
+ default: \
+ break; \
+ } \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \
+ fail(::testing::internal::DeathTest::LastMessage())
+// The symbol "fail" here expands to something into which a message
+// can be streamed.
+
+// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in
+// NDEBUG mode. In this case we need the statements to be executed, the regex is
+// ignored, and the macro must accept a streamed message even though the message
+// is never printed.
+# define GTEST_EXECUTE_STATEMENT_(statement, regex) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } else \
+ ::testing::Message()
+
+// A class representing the parsed contents of the
+// --gtest_internal_run_death_test flag, as it existed when
+// RUN_ALL_TESTS was called.
+class InternalRunDeathTestFlag {
+ public:
+ InternalRunDeathTestFlag(const std::string& a_file,
+ int a_line,
+ int an_index,
+ int a_write_fd)
+ : file_(a_file), line_(a_line), index_(an_index),
+ write_fd_(a_write_fd) {}
+
+ ~InternalRunDeathTestFlag() {
+ if (write_fd_ >= 0)
+ posix::Close(write_fd_);
+ }
+
+ const std::string& file() const { return file_; }
+ int line() const { return line_; }
+ int index() const { return index_; }
+ int write_fd() const { return write_fd_; }
+
+ private:
+ std::string file_;
+ int line_;
+ int index_;
+ int write_fd_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag);
+};
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag();
+
+#else // GTEST_HAS_DEATH_TEST
+
+// This macro is used for implementing macros such as
+// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where
+// death tests are not supported. Those macros must compile on such systems
+// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on
+// systems that support death tests. This allows one to write such a macro
+// on a system that does not support death tests and be sure that it will
+// compile on a death-test supporting system.
+//
+// Parameters:
+// statement - A statement that a macro such as EXPECT_DEATH would test
+// for program termination. This macro has to make sure this
+// statement is compiled but not executed, to ensure that
+// EXPECT_DEATH_IF_SUPPORTED compiles with a certain
+// parameter iff EXPECT_DEATH compiles with it.
+// regex - A regex that a macro such as EXPECT_DEATH would use to test
+// the output of statement. This parameter has to be
+// compiled but not evaluated by this macro, to ensure that
+// this macro only accepts expressions that a macro such as
+// EXPECT_DEATH would accept.
+// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED
+// and a return statement for ASSERT_DEATH_IF_SUPPORTED.
+// This ensures that ASSERT_DEATH_IF_SUPPORTED will not
+// compile inside functions where ASSERT_DEATH doesn't
+// compile.
+//
+// The branch that has an always false condition is used to ensure that
+// statement and regex are compiled (and thus syntactically correct) but
+// never executed. The unreachable code macro protects the terminator
+// statement from generating an 'unreachable code' warning in case
+// statement unconditionally returns or throws. The Message constructor at
+// the end allows the syntax of streaming additional messages into the
+// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH.
+# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ GTEST_LOG_(WARNING) \
+ << "Death tests are not supported on this platform.\n" \
+ << "Statement '" #statement "' cannot be verified."; \
+ } else if (::testing::internal::AlwaysFalse()) { \
+ ::testing::internal::RE::PartialMatch(".*", (regex)); \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ terminator; \
+ } else \
+ ::testing::Message()
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-filepath.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-filepath.h
new file mode 100644
index 000000000..7a13b4b0d
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-filepath.h
@@ -0,0 +1,206 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: keith.ray@gmail.com (Keith Ray)
+//
+// Google Test filepath utilities
+//
+// This header file declares classes and functions used internally by
+// Google Test. They are subject to change without notice.
+//
+// This file is #included in <gtest/internal/gtest-internal.h>.
+// Do not include this header file separately!
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
+
+#include "gtest/internal/gtest-string.h"
+
+namespace testing {
+namespace internal {
+
+// FilePath - a class for file and directory pathname manipulation which
+// handles platform-specific conventions (like the pathname separator).
+// Used for helper functions for naming files in a directory for xml output.
+// Except for Set methods, all methods are const or static, which provides an
+// "immutable value object" -- useful for peace of mind.
+// A FilePath with a value ending in a path separator ("like/this/") represents
+// a directory, otherwise it is assumed to represent a file. In either case,
+// it may or may not represent an actual file or directory in the file system.
+// Names are NOT checked for syntax correctness -- no checking for illegal
+// characters, malformed paths, etc.
+
+class GTEST_API_ FilePath {
+ public:
+ FilePath() : pathname_("") { }
+ FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { }
+
+ explicit FilePath(const std::string& pathname) : pathname_(pathname) {
+ Normalize();
+ }
+
+ FilePath& operator=(const FilePath& rhs) {
+ Set(rhs);
+ return *this;
+ }
+
+ void Set(const FilePath& rhs) {
+ pathname_ = rhs.pathname_;
+ }
+
+ const std::string& string() const { return pathname_; }
+ const char* c_str() const { return pathname_.c_str(); }
+
+ // Returns the current working directory, or "" if unsuccessful.
+ static FilePath GetCurrentDir();
+
+ // Given directory = "dir", base_name = "test", number = 0,
+ // extension = "xml", returns "dir/test.xml". If number is greater
+ // than zero (e.g., 12), returns "dir/test_12.xml".
+ // On Windows platform, uses \ as the separator rather than /.
+ static FilePath MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension);
+
+ // Given directory = "dir", relative_path = "test.xml",
+ // returns "dir/test.xml".
+ // On Windows, uses \ as the separator rather than /.
+ static FilePath ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path);
+
+ // Returns a pathname for a file that does not currently exist. The pathname
+ // will be directory/base_name.extension or
+ // directory/base_name_<number>.extension if directory/base_name.extension
+ // already exists. The number will be incremented until a pathname is found
+ // that does not already exist.
+ // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+ // There could be a race condition if two or more processes are calling this
+ // function at the same time -- they could both pick the same filename.
+ static FilePath GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension);
+
+ // Returns true iff the path is "".
+ bool IsEmpty() const { return pathname_.empty(); }
+
+ // If input name has a trailing separator character, removes it and returns
+ // the name, otherwise return the name string unmodified.
+ // On Windows platform, uses \ as the separator, other platforms use /.
+ FilePath RemoveTrailingPathSeparator() const;
+
+ // Returns a copy of the FilePath with the directory part removed.
+ // Example: FilePath("path/to/file").RemoveDirectoryName() returns
+ // FilePath("file"). If there is no directory part ("just_a_file"), it returns
+ // the FilePath unmodified. If there is no file part ("just_a_dir/") it
+ // returns an empty FilePath ("").
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveDirectoryName() const;
+
+ // RemoveFileName returns the directory path with the filename removed.
+ // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+ // If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+ // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+ // not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+ // On Windows platform, '\' is the path separator, otherwise it is '/'.
+ FilePath RemoveFileName() const;
+
+ // Returns a copy of the FilePath with the case-insensitive extension removed.
+ // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+ // FilePath("dir/file"). If a case-insensitive extension is not
+ // found, returns a copy of the original FilePath.
+ FilePath RemoveExtension(const char* extension) const;
+
+ // Creates directories so that path exists. Returns true if successful or if
+ // the directories already exist; returns false if unable to create
+ // directories for any reason. Will also return false if the FilePath does
+ // not represent a directory (that is, it doesn't end with a path separator).
+ bool CreateDirectoriesRecursively() const;
+
+ // Create the directory so that path exists. Returns true if successful or
+ // if the directory already exists; returns false if unable to create the
+ // directory for any reason, including if the parent directory does not
+ // exist. Not named "CreateDirectory" because that's a macro on Windows.
+ bool CreateFolder() const;
+
+ // Returns true if FilePath describes something in the file-system,
+ // either a file, directory, or whatever, and that something exists.
+ bool FileOrDirectoryExists() const;
+
+ // Returns true if pathname describes a directory in the file-system
+ // that exists.
+ bool DirectoryExists() const;
+
+ // Returns true if FilePath ends with a path separator, which indicates that
+ // it is intended to represent a directory. Returns false otherwise.
+ // This does NOT check that a directory (or file) actually exists.
+ bool IsDirectory() const;
+
+ // Returns true if pathname describes a root directory. (Windows has one
+ // root directory per disk drive.)
+ bool IsRootDirectory() const;
+
+ // Returns true if pathname describes an absolute path.
+ bool IsAbsolutePath() const;
+
+ private:
+ // Replaces multiple consecutive separators with a single separator.
+ // For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+ // redundancies that might be in a pathname involving "." or "..".
+ //
+ // A pathname with multiple consecutive separators may occur either through
+ // user error or as a result of some scripts or APIs that generate a pathname
+ // with a trailing separator. On other platforms the same API or script
+ // may NOT generate a pathname with a trailing "/". Then elsewhere that
+ // pathname may have another "/" and pathname components added to it,
+ // without checking for the separator already being there.
+ // The script language and operating system may allow paths like "foo//bar"
+ // but some of the functions in FilePath will not handle that correctly. In
+ // particular, RemoveTrailingPathSeparator() only removes one separator, and
+ // it is called in CreateDirectoriesRecursively() assuming that it will change
+ // a pathname from directory syntax (trailing separator) to filename syntax.
+ //
+ // On Windows this method also replaces the alternate path separator '/' with
+ // the primary path separator '\\', so that for example "bar\\/\\foo" becomes
+ // "bar\\foo".
+
+ void Normalize();
+
+ // Returns a pointer to the last occurence of a valid path separator in
+ // the FilePath. On Windows, for example, both '/' and '\' are valid path
+ // separators. Returns NULL if no path separator was found.
+ const char* FindLastPathSeparator() const;
+
+ std::string pathname_;
+}; // class FilePath
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-internal.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-internal.h
new file mode 100644
index 000000000..ebd1cf615
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-internal.h
@@ -0,0 +1,1238 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares functions and macros used internally by
+// Google Test. They are subject to change without notice.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
+#include "gtest/internal/gtest-port.h"
+
+#if GTEST_OS_LINUX
+# include <stdlib.h>
+# include <sys/types.h>
+# include <sys/wait.h>
+# include <unistd.h>
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#include <ctype.h>
+#include <float.h>
+#include <string.h>
+#include <iomanip>
+#include <limits>
+#include <map>
+#include <set>
+#include <string>
+#include <vector>
+
+#include "gtest/gtest-message.h"
+#include "gtest/internal/gtest-string.h"
+#include "gtest/internal/gtest-filepath.h"
+#include "gtest/internal/gtest-type-util.h"
+
+// Due to C++ preprocessor weirdness, we need double indirection to
+// concatenate two tokens when one of them is __LINE__. Writing
+//
+// foo ## __LINE__
+//
+// will result in the token foo__LINE__, instead of foo followed by
+// the current line number. For more details, see
+// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6
+#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar)
+#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar
+
+class ProtocolMessage;
+namespace proto2 { class Message; }
+
+namespace testing {
+
+// Forward declarations.
+
+class AssertionResult; // Result of an assertion.
+class Message; // Represents a failure message.
+class Test; // Represents a test.
+class TestInfo; // Information about a test.
+class TestPartResult; // Result of a test part.
+class UnitTest; // A collection of test cases.
+
+template <typename T>
+::std::string PrintToString(const T& value);
+
+namespace internal {
+
+struct TraceInfo; // Information about a trace point.
+class ScopedTrace; // Implements scoped trace.
+class TestInfoImpl; // Opaque implementation of TestInfo
+class UnitTestImpl; // Opaque implementation of UnitTest
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+GTEST_API_ extern const char kStackTraceMarker[];
+
+// Two overloaded helpers for checking at compile time whether an
+// expression is a null pointer literal (i.e. NULL or any 0-valued
+// compile-time integral constant). Their return values have
+// different sizes, so we can use sizeof() to test which version is
+// picked by the compiler. These helpers have no implementations, as
+// we only need their signatures.
+//
+// Given IsNullLiteralHelper(x), the compiler will pick the first
+// version if x can be implicitly converted to Secret*, and pick the
+// second version otherwise. Since Secret is a secret and incomplete
+// type, the only expression a user can write that has type Secret* is
+// a null pointer literal. Therefore, we know that x is a null
+// pointer literal if and only if the first version is picked by the
+// compiler.
+char IsNullLiteralHelper(Secret* p);
+char (&IsNullLiteralHelper(...))[2]; // NOLINT
+
+// A compile-time bool constant that is true if and only if x is a
+// null pointer literal (i.e. NULL or any 0-valued compile-time
+// integral constant).
+#ifdef GTEST_ELLIPSIS_NEEDS_POD_
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_IS_NULL_LITERAL_(x) false
+#else
+# define GTEST_IS_NULL_LITERAL_(x) \
+ (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1)
+#endif // GTEST_ELLIPSIS_NEEDS_POD_
+
+// Appends the user-supplied message to the Google-Test-generated message.
+GTEST_API_ std::string AppendUserMessage(
+ const std::string& gtest_msg, const Message& user_msg);
+
+#if GTEST_HAS_EXCEPTIONS
+
+// This exception is thrown by (and only by) a failed Google Test
+// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions
+// are enabled). We derive it from std::runtime_error, which is for
+// errors presumably detectable only at run time. Since
+// std::runtime_error inherits from std::exception, many testing
+// frameworks know how to extract and print the message inside it.
+class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error {
+ public:
+ explicit GoogleTestFailureException(const TestPartResult& failure);
+};
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+// A helper class for creating scoped traces in user programs.
+class GTEST_API_ ScopedTrace {
+ public:
+ // The c'tor pushes the given source file location and message onto
+ // a trace stack maintained by Google Test.
+ ScopedTrace(const char* file, int line, const Message& message);
+
+ // The d'tor pops the info pushed by the c'tor.
+ //
+ // Note that the d'tor is not virtual in order to be efficient.
+ // Don't inherit from ScopedTrace!
+ ~ScopedTrace();
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace);
+} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its
+ // c'tor and d'tor. Therefore it doesn't
+ // need to be used otherwise.
+
+namespace edit_distance {
+// Returns the optimal edits to go from 'left' to 'right'.
+// All edits cost the same, with replace having lower priority than
+// add/remove.
+// Simple implementation of the Wagner–Fischer algorithm.
+// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm
+enum EditType { kMatch, kAdd, kRemove, kReplace };
+GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
+ const std::vector<size_t>& left, const std::vector<size_t>& right);
+
+// Same as above, but the input is represented as strings.
+GTEST_API_ std::vector<EditType> CalculateOptimalEdits(
+ const std::vector<std::string>& left,
+ const std::vector<std::string>& right);
+
+// Create a diff of the input strings in Unified diff format.
+GTEST_API_ std::string CreateUnifiedDiff(const std::vector<std::string>& left,
+ const std::vector<std::string>& right,
+ size_t context = 2);
+
+} // namespace edit_distance
+
+// Calculate the diff between 'left' and 'right' and return it in unified diff
+// format.
+// If not null, stores in 'total_line_count' the total number of lines found
+// in left + right.
+GTEST_API_ std::string DiffStrings(const std::string& left,
+ const std::string& right,
+ size_t* total_line_count);
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// expected_expression: "foo"
+// actual_expression: "bar"
+// expected_value: "5"
+// actual_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will
+// be inserted into the message.
+GTEST_API_ AssertionResult EqFailure(const char* expected_expression,
+ const char* actual_expression,
+ const std::string& expected_value,
+ const std::string& actual_value,
+ bool ignoring_case);
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+GTEST_API_ std::string GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value);
+
+// This template class represents an IEEE floating-point number
+// (either single-precision or double-precision, depending on the
+// template parameters).
+//
+// The purpose of this class is to do more sophisticated number
+// comparison. (Due to round-off error, etc, it's very unlikely that
+// two floating-points will be equal exactly. Hence a naive
+// comparison by the == operation often doesn't work.)
+//
+// Format of IEEE floating-point:
+//
+// The most-significant bit being the leftmost, an IEEE
+// floating-point looks like
+//
+// sign_bit exponent_bits fraction_bits
+//
+// Here, sign_bit is a single bit that designates the sign of the
+// number.
+//
+// For float, there are 8 exponent bits and 23 fraction bits.
+//
+// For double, there are 11 exponent bits and 52 fraction bits.
+//
+// More details can be found at
+// http://en.wikipedia.org/wiki/IEEE_floating-point_standard.
+//
+// Template parameter:
+//
+// RawType: the raw floating-point type (either float or double)
+template <typename RawType>
+class FloatingPoint {
+ public:
+ // Defines the unsigned integer type that has the same size as the
+ // floating point number.
+ typedef typename TypeWithSize<sizeof(RawType)>::UInt Bits;
+
+ // Constants.
+
+ // # of bits in a number.
+ static const size_t kBitCount = 8*sizeof(RawType);
+
+ // # of fraction bits in a number.
+ static const size_t kFractionBitCount =
+ std::numeric_limits<RawType>::digits - 1;
+
+ // # of exponent bits in a number.
+ static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount;
+
+ // The mask for the sign bit.
+ static const Bits kSignBitMask = static_cast<Bits>(1) << (kBitCount - 1);
+
+ // The mask for the fraction bits.
+ static const Bits kFractionBitMask =
+ ~static_cast<Bits>(0) >> (kExponentBitCount + 1);
+
+ // The mask for the exponent bits.
+ static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask);
+
+ // How many ULP's (Units in the Last Place) we want to tolerate when
+ // comparing two numbers. The larger the value, the more error we
+ // allow. A 0 value means that two numbers must be exactly the same
+ // to be considered equal.
+ //
+ // The maximum error of a single floating-point operation is 0.5
+ // units in the last place. On Intel CPU's, all floating-point
+ // calculations are done with 80-bit precision, while double has 64
+ // bits. Therefore, 4 should be enough for ordinary use.
+ //
+ // See the following article for more details on ULP:
+ // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
+ static const size_t kMaxUlps = 4;
+
+ // Constructs a FloatingPoint from a raw floating-point number.
+ //
+ // On an Intel CPU, passing a non-normalized NAN (Not a Number)
+ // around may change its bits, although the new value is guaranteed
+ // to be also a NAN. Therefore, don't expect this constructor to
+ // preserve the bits in x when x is a NAN.
+ explicit FloatingPoint(const RawType& x) { u_.value_ = x; }
+
+ // Static methods
+
+ // Reinterprets a bit pattern as a floating-point number.
+ //
+ // This function is needed to test the AlmostEquals() method.
+ static RawType ReinterpretBits(const Bits bits) {
+ FloatingPoint fp(0);
+ fp.u_.bits_ = bits;
+ return fp.u_.value_;
+ }
+
+ // Returns the floating-point number that represent positive infinity.
+ static RawType Infinity() {
+ return ReinterpretBits(kExponentBitMask);
+ }
+
+ // Returns the maximum representable finite floating-point number.
+ static RawType Max();
+
+ // Non-static methods
+
+ // Returns the bits that represents this number.
+ const Bits &bits() const { return u_.bits_; }
+
+ // Returns the exponent bits of this number.
+ Bits exponent_bits() const { return kExponentBitMask & u_.bits_; }
+
+ // Returns the fraction bits of this number.
+ Bits fraction_bits() const { return kFractionBitMask & u_.bits_; }
+
+ // Returns the sign bit of this number.
+ Bits sign_bit() const { return kSignBitMask & u_.bits_; }
+
+ // Returns true iff this is NAN (not a number).
+ bool is_nan() const {
+ // It's a NAN if the exponent bits are all ones and the fraction
+ // bits are not entirely zeros.
+ return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0);
+ }
+
+ // Returns true iff this number is at most kMaxUlps ULP's away from
+ // rhs. In particular, this function:
+ //
+ // - returns false if either number is (or both are) NAN.
+ // - treats really large numbers as almost equal to infinity.
+ // - thinks +0.0 and -0.0 are 0 DLP's apart.
+ bool AlmostEquals(const FloatingPoint& rhs) const {
+ // The IEEE standard says that any comparison operation involving
+ // a NAN must return false.
+ if (is_nan() || rhs.is_nan()) return false;
+
+ return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_)
+ <= kMaxUlps;
+ }
+
+ private:
+ // The data type used to store the actual floating-point number.
+ union FloatingPointUnion {
+ RawType value_; // The raw floating-point number.
+ Bits bits_; // The bits that represent the number.
+ };
+
+ // Converts an integer from the sign-and-magnitude representation to
+ // the biased representation. More precisely, let N be 2 to the
+ // power of (kBitCount - 1), an integer x is represented by the
+ // unsigned number x + N.
+ //
+ // For instance,
+ //
+ // -N + 1 (the most negative number representable using
+ // sign-and-magnitude) is represented by 1;
+ // 0 is represented by N; and
+ // N - 1 (the biggest number representable using
+ // sign-and-magnitude) is represented by 2N - 1.
+ //
+ // Read http://en.wikipedia.org/wiki/Signed_number_representations
+ // for more details on signed number representations.
+ static Bits SignAndMagnitudeToBiased(const Bits &sam) {
+ if (kSignBitMask & sam) {
+ // sam represents a negative number.
+ return ~sam + 1;
+ } else {
+ // sam represents a positive number.
+ return kSignBitMask | sam;
+ }
+ }
+
+ // Given two numbers in the sign-and-magnitude representation,
+ // returns the distance between them as an unsigned number.
+ static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1,
+ const Bits &sam2) {
+ const Bits biased1 = SignAndMagnitudeToBiased(sam1);
+ const Bits biased2 = SignAndMagnitudeToBiased(sam2);
+ return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1);
+ }
+
+ FloatingPointUnion u_;
+};
+
+// We cannot use std::numeric_limits<T>::max() as it clashes with the max()
+// macro defined by <windows.h>.
+template <>
+inline float FloatingPoint<float>::Max() { return FLT_MAX; }
+template <>
+inline double FloatingPoint<double>::Max() { return DBL_MAX; }
+
+// Typedefs the instances of the FloatingPoint template class that we
+// care to use.
+typedef FloatingPoint<float> Float;
+typedef FloatingPoint<double> Double;
+
+// In order to catch the mistake of putting tests that use different
+// test fixture classes in the same test case, we need to assign
+// unique IDs to fixture classes and compare them. The TypeId type is
+// used to hold such IDs. The user should treat TypeId as an opaque
+// type: the only operation allowed on TypeId values is to compare
+// them for equality using the == operator.
+typedef const void* TypeId;
+
+template <typename T>
+class TypeIdHelper {
+ public:
+ // dummy_ must not have a const type. Otherwise an overly eager
+ // compiler (e.g. MSVC 7.1 & 8.0) may try to merge
+ // TypeIdHelper<T>::dummy_ for different Ts as an "optimization".
+ static bool dummy_;
+};
+
+template <typename T>
+bool TypeIdHelper<T>::dummy_ = false;
+
+// GetTypeId<T>() returns the ID of type T. Different values will be
+// returned for different types. Calling the function twice with the
+// same type argument is guaranteed to return the same ID.
+template <typename T>
+TypeId GetTypeId() {
+ // The compiler is required to allocate a different
+ // TypeIdHelper<T>::dummy_ variable for each T used to instantiate
+ // the template. Therefore, the address of dummy_ is guaranteed to
+ // be unique.
+ return &(TypeIdHelper<T>::dummy_);
+}
+
+// Returns the type ID of ::testing::Test. Always call this instead
+// of GetTypeId< ::testing::Test>() to get the type ID of
+// ::testing::Test, as the latter may give the wrong result due to a
+// suspected linker bug when compiling Google Test as a Mac OS X
+// framework.
+GTEST_API_ TypeId GetTestTypeId();
+
+// Defines the abstract factory interface that creates instances
+// of a Test object.
+class TestFactoryBase {
+ public:
+ virtual ~TestFactoryBase() {}
+
+ // Creates a test instance to run. The instance is both created and destroyed
+ // within TestInfoImpl::Run()
+ virtual Test* CreateTest() = 0;
+
+ protected:
+ TestFactoryBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase);
+};
+
+// This class provides implementation of TeastFactoryBase interface.
+// It is used in TEST and TEST_F macros.
+template <class TestClass>
+class TestFactoryImpl : public TestFactoryBase {
+ public:
+ virtual Test* CreateTest() { return new TestClass; }
+};
+
+#if GTEST_OS_WINDOWS
+
+// Predicate-formatters for implementing the HRESULT checking macros
+// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}
+// We pass a long instead of HRESULT to avoid causing an
+// include dependency for the HRESULT type.
+GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr,
+ long hr); // NOLINT
+GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr,
+ long hr); // NOLINT
+
+#endif // GTEST_OS_WINDOWS
+
+// Types of SetUpTestCase() and TearDownTestCase() functions.
+typedef void (*SetUpTestCaseFunc)();
+typedef void (*TearDownTestCaseFunc)();
+
+struct CodeLocation {
+ CodeLocation(const string& a_file, int a_line) : file(a_file), line(a_line) {}
+
+ string file;
+ int line;
+};
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// type_param the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param text representation of the test's value parameter,
+// or NULL if this is not a type-parameterized test.
+// code_location: code location where the test is defined
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+GTEST_API_ TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name,
+ const char* name,
+ const char* type_param,
+ const char* value_param,
+ CodeLocation code_location,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory);
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr);
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// State of the definition of a type-parameterized test case.
+class GTEST_API_ TypedTestCasePState {
+ public:
+ TypedTestCasePState() : registered_(false) {}
+
+ // Adds the given test name to defined_test_names_ and return true
+ // if the test case hasn't been registered; otherwise aborts the
+ // program.
+ bool AddTestName(const char* file, int line, const char* case_name,
+ const char* test_name) {
+ if (registered_) {
+ fprintf(stderr, "%s Test %s must be defined before "
+ "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n",
+ FormatFileLocation(file, line).c_str(), test_name, case_name);
+ fflush(stderr);
+ posix::Abort();
+ }
+ registered_tests_.insert(
+ ::std::make_pair(test_name, CodeLocation(file, line)));
+ return true;
+ }
+
+ bool TestExists(const std::string& test_name) const {
+ return registered_tests_.count(test_name) > 0;
+ }
+
+ const CodeLocation& GetCodeLocation(const std::string& test_name) const {
+ RegisteredTestsMap::const_iterator it = registered_tests_.find(test_name);
+ GTEST_CHECK_(it != registered_tests_.end());
+ return it->second;
+ }
+
+ // Verifies that registered_tests match the test names in
+ // defined_test_names_; returns registered_tests if successful, or
+ // aborts the program otherwise.
+ const char* VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests);
+
+ private:
+ typedef ::std::map<std::string, CodeLocation> RegisteredTestsMap;
+
+ bool registered_;
+ RegisteredTestsMap registered_tests_;
+};
+
+// Skips to the first non-space char after the first comma in 'str';
+// returns NULL if no comma is found in 'str'.
+inline const char* SkipComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ if (comma == NULL) {
+ return NULL;
+ }
+ while (IsSpace(*(++comma))) {}
+ return comma;
+}
+
+// Returns the prefix of 'str' before the first comma in it; returns
+// the entire string if it contains no comma.
+inline std::string GetPrefixUntilComma(const char* str) {
+ const char* comma = strchr(str, ',');
+ return comma == NULL ? str : std::string(str, comma);
+}
+
+// Splits a given string on a given delimiter, populating a given
+// vector with the fields.
+void SplitString(const ::std::string& str, char delimiter,
+ ::std::vector< ::std::string>* dest);
+
+// TypeParameterizedTest<Fixture, TestSel, Types>::Register()
+// registers a list of type-parameterized tests with Google Test. The
+// return value is insignificant - we just need to return something
+// such that we can call this function in a namespace scope.
+//
+// Implementation note: The GTEST_TEMPLATE_ macro declares a template
+// template parameter. It's defined in gtest-type-util.h.
+template <GTEST_TEMPLATE_ Fixture, class TestSel, typename Types>
+class TypeParameterizedTest {
+ public:
+ // 'index' is the index of the test in the type list 'Types'
+ // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase,
+ // Types). Valid values for 'index' are [0, N - 1] where N is the
+ // length of Types.
+ static bool Register(const char* prefix,
+ CodeLocation code_location,
+ const char* case_name, const char* test_names,
+ int index) {
+ typedef typename Types::Head Type;
+ typedef Fixture<Type> FixtureClass;
+ typedef typename GTEST_BIND_(TestSel, Type) TestClass;
+
+ // First, registers the first type-parameterized test in the type
+ // list.
+ MakeAndRegisterTestInfo(
+ (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/"
+ + StreamableToString(index)).c_str(),
+ StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(),
+ GetTypeName<Type>().c_str(),
+ NULL, // No value parameter.
+ code_location,
+ GetTypeId<FixtureClass>(),
+ TestClass::SetUpTestCase,
+ TestClass::TearDownTestCase,
+ new TestFactoryImpl<TestClass>);
+
+ // Next, recurses (at compile time) with the tail of the type list.
+ return TypeParameterizedTest<Fixture, TestSel, typename Types::Tail>
+ ::Register(prefix, code_location, case_name, test_names, index + 1);
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, class TestSel>
+class TypeParameterizedTest<Fixture, TestSel, Types0> {
+ public:
+ static bool Register(const char* /*prefix*/, CodeLocation,
+ const char* /*case_name*/, const char* /*test_names*/,
+ int /*index*/) {
+ return true;
+ }
+};
+
+// TypeParameterizedTestCase<Fixture, Tests, Types>::Register()
+// registers *all combinations* of 'Tests' and 'Types' with Google
+// Test. The return value is insignificant - we just need to return
+// something such that we can call this function in a namespace scope.
+template <GTEST_TEMPLATE_ Fixture, typename Tests, typename Types>
+class TypeParameterizedTestCase {
+ public:
+ static bool Register(const char* prefix, CodeLocation code_location,
+ const TypedTestCasePState* state,
+ const char* case_name, const char* test_names) {
+ std::string test_name = StripTrailingSpaces(
+ GetPrefixUntilComma(test_names));
+ if (!state->TestExists(test_name)) {
+ fprintf(stderr, "Failed to get code location for test %s.%s at %s.",
+ case_name, test_name.c_str(),
+ FormatFileLocation(code_location.file.c_str(),
+ code_location.line).c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+ const CodeLocation& test_location = state->GetCodeLocation(test_name);
+
+ typedef typename Tests::Head Head;
+
+ // First, register the first test in 'Test' for each type in 'Types'.
+ TypeParameterizedTest<Fixture, Head, Types>::Register(
+ prefix, test_location, case_name, test_names, 0);
+
+ // Next, recurses (at compile time) with the tail of the test list.
+ return TypeParameterizedTestCase<Fixture, typename Tests::Tail, Types>
+ ::Register(prefix, code_location, state,
+ case_name, SkipComma(test_names));
+ }
+};
+
+// The base case for the compile time recursion.
+template <GTEST_TEMPLATE_ Fixture, typename Types>
+class TypeParameterizedTestCase<Fixture, Templates0, Types> {
+ public:
+ static bool Register(const char* /*prefix*/, CodeLocation,
+ const TypedTestCasePState* /*state*/,
+ const char* /*case_name*/, const char* /*test_names*/) {
+ return true;
+ }
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+GTEST_API_ std::string GetCurrentOsStackTraceExceptTop(
+ UnitTest* unit_test, int skip_count);
+
+// Helpers for suppressing warnings on unreachable code or constant
+// condition.
+
+// Always returns true.
+GTEST_API_ bool AlwaysTrue();
+
+// Always returns false.
+inline bool AlwaysFalse() { return !AlwaysTrue(); }
+
+// Helper for suppressing false warning from Clang on a const char*
+// variable declared in a conditional expression always being NULL in
+// the else branch.
+struct GTEST_API_ ConstCharPtr {
+ ConstCharPtr(const char* str) : value(str) {}
+ operator bool() const { return true; }
+ const char* value;
+};
+
+// A simple Linear Congruential Generator for generating random
+// numbers with a uniform distribution. Unlike rand() and srand(), it
+// doesn't use global state (and therefore can't interfere with user
+// code). Unlike rand_r(), it's portable. An LCG isn't very random,
+// but it's good enough for our purposes.
+class GTEST_API_ Random {
+ public:
+ static const UInt32 kMaxRange = 1u << 31;
+
+ explicit Random(UInt32 seed) : state_(seed) {}
+
+ void Reseed(UInt32 seed) { state_ = seed; }
+
+ // Generates a random number from [0, range). Crashes if 'range' is
+ // 0 or greater than kMaxRange.
+ UInt32 Generate(UInt32 range);
+
+ private:
+ UInt32 state_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Random);
+};
+
+// Defining a variable of type CompileAssertTypesEqual<T1, T2> will cause a
+// compiler error iff T1 and T2 are different types.
+template <typename T1, typename T2>
+struct CompileAssertTypesEqual;
+
+template <typename T>
+struct CompileAssertTypesEqual<T, T> {
+};
+
+// Removes the reference from a type if it is a reference type,
+// otherwise leaves it unchanged. This is the same as
+// tr1::remove_reference, which is not widely available yet.
+template <typename T>
+struct RemoveReference { typedef T type; }; // NOLINT
+template <typename T>
+struct RemoveReference<T&> { typedef T type; }; // NOLINT
+
+// A handy wrapper around RemoveReference that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_REFERENCE_(T) \
+ typename ::testing::internal::RemoveReference<T>::type
+
+// Removes const from a type if it is a const type, otherwise leaves
+// it unchanged. This is the same as tr1::remove_const, which is not
+// widely available yet.
+template <typename T>
+struct RemoveConst { typedef T type; }; // NOLINT
+template <typename T>
+struct RemoveConst<const T> { typedef T type; }; // NOLINT
+
+// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above
+// definition to fail to remove the const in 'const int[3]' and 'const
+// char[3][4]'. The following specialization works around the bug.
+template <typename T, size_t N>
+struct RemoveConst<const T[N]> {
+ typedef typename RemoveConst<T>::type type[N];
+};
+
+#if defined(_MSC_VER) && _MSC_VER < 1400
+// This is the only specialization that allows VC++ 7.1 to remove const in
+// 'const int[3] and 'const int[3][4]'. However, it causes trouble with GCC
+// and thus needs to be conditionally compiled.
+template <typename T, size_t N>
+struct RemoveConst<T[N]> {
+ typedef typename RemoveConst<T>::type type[N];
+};
+#endif
+
+// A handy wrapper around RemoveConst that works when the argument
+// T depends on template parameters.
+#define GTEST_REMOVE_CONST_(T) \
+ typename ::testing::internal::RemoveConst<T>::type
+
+// Turns const U&, U&, const U, and U all into U.
+#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \
+ GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T))
+
+// Adds reference to a type if it is not a reference type,
+// otherwise leaves it unchanged. This is the same as
+// tr1::add_reference, which is not widely available yet.
+template <typename T>
+struct AddReference { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddReference<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper around AddReference that works when the argument T
+// depends on template parameters.
+#define GTEST_ADD_REFERENCE_(T) \
+ typename ::testing::internal::AddReference<T>::type
+
+// Adds a reference to const on top of T as necessary. For example,
+// it transforms
+//
+// char ==> const char&
+// const char ==> const char&
+// char& ==> const char&
+// const char& ==> const char&
+//
+// The argument T must depend on some template parameters.
+#define GTEST_REFERENCE_TO_CONST_(T) \
+ GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T))
+
+// ImplicitlyConvertible<From, To>::value is a compile-time bool
+// constant that's true iff type From can be implicitly converted to
+// type To.
+template <typename From, typename To>
+class ImplicitlyConvertible {
+ private:
+ // We need the following helper functions only for their types.
+ // They have no implementations.
+
+ // MakeFrom() is an expression whose type is From. We cannot simply
+ // use From(), as the type From may not have a public default
+ // constructor.
+ static typename AddReference<From>::type MakeFrom();
+
+ // These two functions are overloaded. Given an expression
+ // Helper(x), the compiler will pick the first version if x can be
+ // implicitly converted to type To; otherwise it will pick the
+ // second version.
+ //
+ // The first version returns a value of size 1, and the second
+ // version returns a value of size 2. Therefore, by checking the
+ // size of Helper(x), which can be done at compile time, we can tell
+ // which version of Helper() is used, and hence whether x can be
+ // implicitly converted to type To.
+ static char Helper(To);
+ static char (&Helper(...))[2]; // NOLINT
+
+ // We have to put the 'public' section after the 'private' section,
+ // or MSVC refuses to compile the code.
+ public:
+#if defined(__BORLANDC__)
+ // C++Builder cannot use member overload resolution during template
+ // instantiation. The simplest workaround is to use its C++0x type traits
+ // functions (C++Builder 2009 and above only).
+ static const bool value = __is_convertible(From, To);
+#else
+ // MSVC warns about implicitly converting from double to int for
+ // possible loss of data, so we need to temporarily disable the
+ // warning.
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4244)
+ static const bool value =
+ sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1;
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif // __BORLANDC__
+};
+template <typename From, typename To>
+const bool ImplicitlyConvertible<From, To>::value;
+
+// IsAProtocolMessage<T>::value is a compile-time bool constant that's
+// true iff T is type ProtocolMessage, proto2::Message, or a subclass
+// of those.
+template <typename T>
+struct IsAProtocolMessage
+ : public bool_constant<
+ ImplicitlyConvertible<const T*, const ::ProtocolMessage*>::value ||
+ ImplicitlyConvertible<const T*, const ::proto2::Message*>::value> {
+};
+
+// When the compiler sees expression IsContainerTest<C>(0), if C is an
+// STL-style container class, the first overload of IsContainerTest
+// will be viable (since both C::iterator* and C::const_iterator* are
+// valid types and NULL can be implicitly converted to them). It will
+// be picked over the second overload as 'int' is a perfect match for
+// the type of argument 0. If C::iterator or C::const_iterator is not
+// a valid type, the first overload is not viable, and the second
+// overload will be picked. Therefore, we can determine whether C is
+// a container class by checking the type of IsContainerTest<C>(0).
+// The value of the expression is insignificant.
+//
+// Note that we look for both C::iterator and C::const_iterator. The
+// reason is that C++ injects the name of a class as a member of the
+// class itself (e.g. you can refer to class iterator as either
+// 'iterator' or 'iterator::iterator'). If we look for C::iterator
+// only, for example, we would mistakenly think that a class named
+// iterator is an STL container.
+//
+// Also note that the simpler approach of overloading
+// IsContainerTest(typename C::const_iterator*) and
+// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++.
+typedef int IsContainer;
+template <class C>
+IsContainer IsContainerTest(int /* dummy */,
+ typename C::iterator* /* it */ = NULL,
+ typename C::const_iterator* /* const_it */ = NULL) {
+ return 0;
+}
+
+typedef char IsNotContainer;
+template <class C>
+IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; }
+
+// EnableIf<condition>::type is void when 'Cond' is true, and
+// undefined when 'Cond' is false. To use SFINAE to make a function
+// overload only apply when a particular expression is true, add
+// "typename EnableIf<expression>::type* = 0" as the last parameter.
+template<bool> struct EnableIf;
+template<> struct EnableIf<true> { typedef void type; }; // NOLINT
+
+// Utilities for native arrays.
+
+// ArrayEq() compares two k-dimensional native arrays using the
+// elements' operator==, where k can be any integer >= 0. When k is
+// 0, ArrayEq() degenerates into comparing a single pair of values.
+
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) {
+ return internal::ArrayEq(lhs, N, rhs);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous ArrayEq() function, arrays with different sizes would
+// lead to different copies of the template code.
+template <typename T, typename U>
+bool ArrayEq(const T* lhs, size_t size, const U* rhs) {
+ for (size_t i = 0; i != size; i++) {
+ if (!internal::ArrayEq(lhs[i], rhs[i]))
+ return false;
+ }
+ return true;
+}
+
+// Finds the first element in the iterator range [begin, end) that
+// equals elem. Element may be a native array type itself.
+template <typename Iter, typename Element>
+Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) {
+ for (Iter it = begin; it != end; ++it) {
+ if (internal::ArrayEq(*it, elem))
+ return it;
+ }
+ return end;
+}
+
+// CopyArray() copies a k-dimensional native array using the elements'
+// operator=, where k can be any integer >= 0. When k is 0,
+// CopyArray() degenerates into copying a single value.
+
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to);
+
+// This generic version is used when k is 0.
+template <typename T, typename U>
+inline void CopyArray(const T& from, U* to) { *to = from; }
+
+// This overload is used when k >= 1.
+template <typename T, typename U, size_t N>
+inline void CopyArray(const T(&from)[N], U(*to)[N]) {
+ internal::CopyArray(from, N, *to);
+}
+
+// This helper reduces code bloat. If we instead put its logic inside
+// the previous CopyArray() function, arrays with different sizes
+// would lead to different copies of the template code.
+template <typename T, typename U>
+void CopyArray(const T* from, size_t size, U* to) {
+ for (size_t i = 0; i != size; i++) {
+ internal::CopyArray(from[i], to + i);
+ }
+}
+
+// The relation between an NativeArray object (see below) and the
+// native array it represents.
+// We use 2 different structs to allow non-copyable types to be used, as long
+// as RelationToSourceReference() is passed.
+struct RelationToSourceReference {};
+struct RelationToSourceCopy {};
+
+// Adapts a native array to a read-only STL-style container. Instead
+// of the complete STL container concept, this adaptor only implements
+// members useful for Google Mock's container matchers. New members
+// should be added as needed. To simplify the implementation, we only
+// support Element being a raw type (i.e. having no top-level const or
+// reference modifier). It's the client's responsibility to satisfy
+// this requirement. Element can be an array type itself (hence
+// multi-dimensional arrays are supported).
+template <typename Element>
+class NativeArray {
+ public:
+ // STL-style container typedefs.
+ typedef Element value_type;
+ typedef Element* iterator;
+ typedef const Element* const_iterator;
+
+ // Constructs from a native array. References the source.
+ NativeArray(const Element* array, size_t count, RelationToSourceReference) {
+ InitRef(array, count);
+ }
+
+ // Constructs from a native array. Copies the source.
+ NativeArray(const Element* array, size_t count, RelationToSourceCopy) {
+ InitCopy(array, count);
+ }
+
+ // Copy constructor.
+ NativeArray(const NativeArray& rhs) {
+ (this->*rhs.clone_)(rhs.array_, rhs.size_);
+ }
+
+ ~NativeArray() {
+ if (clone_ != &NativeArray::InitRef)
+ delete[] array_;
+ }
+
+ // STL-style container methods.
+ size_t size() const { return size_; }
+ const_iterator begin() const { return array_; }
+ const_iterator end() const { return array_ + size_; }
+ bool operator==(const NativeArray& rhs) const {
+ return size() == rhs.size() &&
+ ArrayEq(begin(), size(), rhs.begin());
+ }
+
+ private:
+ enum {
+ kCheckTypeIsNotConstOrAReference = StaticAssertTypeEqHelper<
+ Element, GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>::value,
+ };
+
+ // Initializes this object with a copy of the input.
+ void InitCopy(const Element* array, size_t a_size) {
+ Element* const copy = new Element[a_size];
+ CopyArray(array, a_size, copy);
+ array_ = copy;
+ size_ = a_size;
+ clone_ = &NativeArray::InitCopy;
+ }
+
+ // Initializes this object with a reference of the input.
+ void InitRef(const Element* array, size_t a_size) {
+ array_ = array;
+ size_ = a_size;
+ clone_ = &NativeArray::InitRef;
+ }
+
+ const Element* array_;
+ size_t size_;
+ void (NativeArray::*clone_)(const Element*, size_t);
+
+ GTEST_DISALLOW_ASSIGN_(NativeArray);
+};
+
+} // namespace internal
+} // namespace testing
+
+#define GTEST_MESSAGE_AT_(file, line, message, result_type) \
+ ::testing::internal::AssertHelper(result_type, file, line, message) \
+ = ::testing::Message()
+
+#define GTEST_MESSAGE_(message, result_type) \
+ GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type)
+
+#define GTEST_FATAL_FAILURE_(message) \
+ return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure)
+
+#define GTEST_NONFATAL_FAILURE_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure)
+
+#define GTEST_SUCCESS_(message) \
+ GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess)
+
+// Suppresses MSVC warnings 4072 (unreachable code) for the code following
+// statement if it returns or throws (or doesn't return or throw in some
+// situations).
+#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \
+ if (::testing::internal::AlwaysTrue()) { statement; }
+
+#define GTEST_TEST_THROW_(statement, expected_exception, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::ConstCharPtr gtest_msg = "") { \
+ bool gtest_caught_expected = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (expected_exception const&) { \
+ gtest_caught_expected = true; \
+ } \
+ catch (...) { \
+ gtest_msg.value = \
+ "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws a different type."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ if (!gtest_caught_expected) { \
+ gtest_msg.value = \
+ "Expected: " #statement " throws an exception of type " \
+ #expected_exception ".\n Actual: it throws nothing."; \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \
+ fail(gtest_msg.value)
+
+#define GTEST_TEST_NO_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \
+ fail("Expected: " #statement " doesn't throw an exception.\n" \
+ " Actual: it throws.")
+
+#define GTEST_TEST_ANY_THROW_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ bool gtest_caught_any = false; \
+ try { \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ } \
+ catch (...) { \
+ gtest_caught_any = true; \
+ } \
+ if (!gtest_caught_any) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \
+ fail("Expected: " #statement " throws an exception.\n" \
+ " Actual: it doesn't.")
+
+
+// Implements Boolean test assertions such as EXPECT_TRUE. expression can be
+// either a boolean expression or an AssertionResult. text is a textual
+// represenation of expression as it was passed into the EXPECT_TRUE.
+#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (const ::testing::AssertionResult gtest_ar_ = \
+ ::testing::AssertionResult(expression)) \
+ ; \
+ else \
+ fail(::testing::internal::GetBoolAssertionFailureMessage(\
+ gtest_ar_, text, #actual, #expected).c_str())
+
+#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::AlwaysTrue()) { \
+ ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \
+ GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \
+ if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \
+ goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \
+ } \
+ } else \
+ GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \
+ fail("Expected: " #statement " doesn't generate new fatal " \
+ "failures in the current thread.\n" \
+ " Actual: it does.")
+
+// Expands to the name of the class that implements the given test.
+#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \
+ test_case_name##_##test_name##_Test
+
+// Helper macro for defining tests.
+#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\
+class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\
+ public:\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\
+ private:\
+ virtual void TestBody();\
+ static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\
+};\
+\
+::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\
+ ::test_info_ =\
+ ::testing::internal::MakeAndRegisterTestInfo(\
+ #test_case_name, #test_name, NULL, NULL, \
+ ::testing::internal::CodeLocation(__FILE__, __LINE__), \
+ (parent_id), \
+ parent_class::SetUpTestCase, \
+ parent_class::TearDownTestCase, \
+ new ::testing::internal::TestFactoryImpl<\
+ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\
+void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody()
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_
+
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-linked_ptr.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-linked_ptr.h
new file mode 100644
index 000000000..360294221
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-linked_ptr.h
@@ -0,0 +1,243 @@
+// Copyright 2003 Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: Dan Egnor (egnor@google.com)
+//
+// A "smart" pointer type with reference tracking. Every pointer to a
+// particular object is kept on a circular linked list. When the last pointer
+// to an object is destroyed or reassigned, the object is deleted.
+//
+// Used properly, this deletes the object when the last reference goes away.
+// There are several caveats:
+// - Like all reference counting schemes, cycles lead to leaks.
+// - Each smart pointer is actually two pointers (8 bytes instead of 4).
+// - Every time a pointer is assigned, the entire list of pointers to that
+// object is traversed. This class is therefore NOT SUITABLE when there
+// will often be more than two or three pointers to a particular object.
+// - References are only tracked as long as linked_ptr<> objects are copied.
+// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS
+// will happen (double deletion).
+//
+// A good use of this class is storing object references in STL containers.
+// You can safely put linked_ptr<> in a vector<>.
+// Other uses may not be as good.
+//
+// Note: If you use an incomplete type with linked_ptr<>, the class
+// *containing* linked_ptr<> must have a constructor and destructor (even
+// if they do nothing!).
+//
+// Bill Gibbons suggested we use something like this.
+//
+// Thread Safety:
+// Unlike other linked_ptr implementations, in this implementation
+// a linked_ptr object is thread-safe in the sense that:
+// - it's safe to copy linked_ptr objects concurrently,
+// - it's safe to copy *from* a linked_ptr and read its underlying
+// raw pointer (e.g. via get()) concurrently, and
+// - it's safe to write to two linked_ptrs that point to the same
+// shared object concurrently.
+// TODO(wan@google.com): rename this to safe_linked_ptr to avoid
+// confusion with normal linked_ptr.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
+
+#include <stdlib.h>
+#include <assert.h>
+
+#include "gtest/internal/gtest-port.h"
+
+namespace testing {
+namespace internal {
+
+// Protects copying of all linked_ptr objects.
+GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// This is used internally by all instances of linked_ptr<>. It needs to be
+// a non-template class because different types of linked_ptr<> can refer to
+// the same object (linked_ptr<Superclass>(obj) vs linked_ptr<Subclass>(obj)).
+// So, it needs to be possible for different types of linked_ptr to participate
+// in the same circular linked list, so we need a single class type here.
+//
+// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr<T>.
+class linked_ptr_internal {
+ public:
+ // Create a new circle that includes only this instance.
+ void join_new() {
+ next_ = this;
+ }
+
+ // Many linked_ptr operations may change p.link_ for some linked_ptr
+ // variable p in the same circle as this object. Therefore we need
+ // to prevent two such operations from occurring concurrently.
+ //
+ // Note that different types of linked_ptr objects can coexist in a
+ // circle (e.g. linked_ptr<Base>, linked_ptr<Derived1>, and
+ // linked_ptr<Derived2>). Therefore we must use a single mutex to
+ // protect all linked_ptr objects. This can create serious
+ // contention in production code, but is acceptable in a testing
+ // framework.
+
+ // Join an existing circle.
+ void join(linked_ptr_internal const* ptr)
+ GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ linked_ptr_internal const* p = ptr;
+ while (p->next_ != ptr) {
+ assert(p->next_ != this &&
+ "Trying to join() a linked ring we are already in. "
+ "Is GMock thread safety enabled?");
+ p = p->next_;
+ }
+ p->next_ = this;
+ next_ = ptr;
+ }
+
+ // Leave whatever circle we're part of. Returns true if we were the
+ // last member of the circle. Once this is done, you can join() another.
+ bool depart()
+ GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) {
+ MutexLock lock(&g_linked_ptr_mutex);
+
+ if (next_ == this) return true;
+ linked_ptr_internal const* p = next_;
+ while (p->next_ != this) {
+ assert(p->next_ != next_ &&
+ "Trying to depart() a linked ring we are not in. "
+ "Is GMock thread safety enabled?");
+ p = p->next_;
+ }
+ p->next_ = next_;
+ return false;
+ }
+
+ private:
+ mutable linked_ptr_internal const* next_;
+};
+
+template <typename T>
+class linked_ptr {
+ public:
+ typedef T element_type;
+
+ // Take over ownership of a raw pointer. This should happen as soon as
+ // possible after the object is created.
+ explicit linked_ptr(T* ptr = NULL) { capture(ptr); }
+ ~linked_ptr() { depart(); }
+
+ // Copy an existing linked_ptr<>, adding ourselves to the list of references.
+ template <typename U> linked_ptr(linked_ptr<U> const& ptr) { copy(&ptr); }
+ linked_ptr(linked_ptr const& ptr) { // NOLINT
+ assert(&ptr != this);
+ copy(&ptr);
+ }
+
+ // Assignment releases the old value and acquires the new.
+ template <typename U> linked_ptr& operator=(linked_ptr<U> const& ptr) {
+ depart();
+ copy(&ptr);
+ return *this;
+ }
+
+ linked_ptr& operator=(linked_ptr const& ptr) {
+ if (&ptr != this) {
+ depart();
+ copy(&ptr);
+ }
+ return *this;
+ }
+
+ // Smart pointer members.
+ void reset(T* ptr = NULL) {
+ depart();
+ capture(ptr);
+ }
+ T* get() const { return value_; }
+ T* operator->() const { return value_; }
+ T& operator*() const { return *value_; }
+
+ bool operator==(T* p) const { return value_ == p; }
+ bool operator!=(T* p) const { return value_ != p; }
+ template <typename U>
+ bool operator==(linked_ptr<U> const& ptr) const {
+ return value_ == ptr.get();
+ }
+ template <typename U>
+ bool operator!=(linked_ptr<U> const& ptr) const {
+ return value_ != ptr.get();
+ }
+
+ private:
+ template <typename U>
+ friend class linked_ptr;
+
+ T* value_;
+ linked_ptr_internal link_;
+
+ void depart() {
+ if (link_.depart()) delete value_;
+ }
+
+ void capture(T* ptr) {
+ value_ = ptr;
+ link_.join_new();
+ }
+
+ template <typename U> void copy(linked_ptr<U> const* ptr) {
+ value_ = ptr->get();
+ if (value_)
+ link_.join(&ptr->link_);
+ else
+ link_.join_new();
+ }
+};
+
+template<typename T> inline
+bool operator==(T* ptr, const linked_ptr<T>& x) {
+ return ptr == x.get();
+}
+
+template<typename T> inline
+bool operator!=(T* ptr, const linked_ptr<T>& x) {
+ return ptr != x.get();
+}
+
+// A function to convert T* into linked_ptr<T>
+// Doing e.g. make_linked_ptr(new FooBarBaz<type>(arg)) is a shorter notation
+// for linked_ptr<FooBarBaz<type> >(new FooBarBaz<type>(arg))
+template <typename T>
+linked_ptr<T> make_linked_ptr(T* ptr) {
+ return linked_ptr<T>(ptr);
+}
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h
new file mode 100644
index 000000000..4d1d81d20
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h
@@ -0,0 +1,5146 @@
+// This file was GENERATED by command:
+// pump.py gtest-param-util-generated.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently Google Test supports at most 50 arguments in Values,
+// and at most 10 arguments in Combine. Please contact
+// googletestframework@googlegroups.com if you need more.
+// Please note that the number of arguments to Combine is limited
+// by the maximum arity of the implementation of tuple which is
+// currently set at 10.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include "gtest/internal/gtest-param-util.h"
+#include "gtest/internal/gtest-port.h"
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
+namespace internal {
+
+// Used in the Values() function to provide polymorphic capabilities.
+template <typename T1>
+class ValueArray1 {
+ public:
+ explicit ValueArray1(T1 v1) : v1_(v1) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray1& other);
+
+ const T1 v1_;
+};
+
+template <typename T1, typename T2>
+class ValueArray2 {
+ public:
+ ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray2& other);
+
+ const T1 v1_;
+ const T2 v2_;
+};
+
+template <typename T1, typename T2, typename T3>
+class ValueArray3 {
+ public:
+ ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray3& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+class ValueArray4 {
+ public:
+ ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray4& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class ValueArray5 {
+ public:
+ ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray5& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class ValueArray6 {
+ public:
+ ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray6& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class ValueArray7 {
+ public:
+ ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray7& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class ValueArray8 {
+ public:
+ ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7,
+ T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray8& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class ValueArray9 {
+ public:
+ ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8,
+ T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray9& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class ValueArray10 {
+ public:
+ ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray10& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+class ValueArray11 {
+ public:
+ ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray11& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+class ValueArray12 {
+ public:
+ ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray12& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+class ValueArray13 {
+ public:
+ ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray13& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+class ValueArray14 {
+ public:
+ ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray14& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+class ValueArray15 {
+ public:
+ ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray15& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+class ValueArray16 {
+ public:
+ ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray16& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+class ValueArray17 {
+ public:
+ ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16,
+ T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray17& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+class ValueArray18 {
+ public:
+ ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray18& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+class ValueArray19 {
+ public:
+ ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray19& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+class ValueArray20 {
+ public:
+ ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray20& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+class ValueArray21 {
+ public:
+ ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray21& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+class ValueArray22 {
+ public:
+ ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray22& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+class ValueArray23 {
+ public:
+ ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray23& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+class ValueArray24 {
+ public:
+ ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray24& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+class ValueArray25 {
+ public:
+ ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24,
+ T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray25& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+class ValueArray26 {
+ public:
+ ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray26& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+class ValueArray27 {
+ public:
+ ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray27& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+class ValueArray28 {
+ public:
+ ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray28& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+class ValueArray29 {
+ public:
+ ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray29& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+class ValueArray30 {
+ public:
+ ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray30& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+class ValueArray31 {
+ public:
+ ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray31& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+class ValueArray32 {
+ public:
+ ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray32& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+class ValueArray33 {
+ public:
+ ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32,
+ T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray33& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+class ValueArray34 {
+ public:
+ ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray34& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+class ValueArray35 {
+ public:
+ ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray35& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+class ValueArray36 {
+ public:
+ ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray36& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+class ValueArray37 {
+ public:
+ ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray37& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+class ValueArray38 {
+ public:
+ ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray38& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+class ValueArray39 {
+ public:
+ ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray39& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+class ValueArray40 {
+ public:
+ ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray40& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+class ValueArray41 {
+ public:
+ ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40,
+ T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray41& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+class ValueArray42 {
+ public:
+ ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray42& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+class ValueArray43 {
+ public:
+ ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6),
+ v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13),
+ v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19),
+ v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25),
+ v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31),
+ v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37),
+ v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray43& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+class ValueArray44 {
+ public:
+ ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5),
+ v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12),
+ v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18),
+ v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24),
+ v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30),
+ v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36),
+ v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42),
+ v43_(v43), v44_(v44) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray44& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+class ValueArray45 {
+ public:
+ ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4),
+ v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11),
+ v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17),
+ v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23),
+ v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29),
+ v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35),
+ v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41),
+ v42_(v42), v43_(v43), v44_(v44), v45_(v45) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray45& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+class ValueArray46 {
+ public:
+ ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3),
+ v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray46& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+class ValueArray47 {
+ public:
+ ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2),
+ v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10),
+ v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16),
+ v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22),
+ v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28),
+ v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34),
+ v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40),
+ v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46),
+ v47_(v47) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray47& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+class ValueArray48 {
+ public:
+ ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1),
+ v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9),
+ v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15),
+ v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21),
+ v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27),
+ v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33),
+ v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39),
+ v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45),
+ v46_(v46), v47_(v47), v48_(v48) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+ static_cast<T>(v48_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray48& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+class ValueArray49 {
+ public:
+ ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48,
+ T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+ static_cast<T>(v48_), static_cast<T>(v49_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray49& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+class ValueArray50 {
+ public:
+ ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9,
+ T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17,
+ T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25,
+ T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33,
+ T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41,
+ T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49,
+ T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7),
+ v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14),
+ v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20),
+ v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26),
+ v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32),
+ v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38),
+ v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44),
+ v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {static_cast<T>(v1_), static_cast<T>(v2_),
+ static_cast<T>(v3_), static_cast<T>(v4_), static_cast<T>(v5_),
+ static_cast<T>(v6_), static_cast<T>(v7_), static_cast<T>(v8_),
+ static_cast<T>(v9_), static_cast<T>(v10_), static_cast<T>(v11_),
+ static_cast<T>(v12_), static_cast<T>(v13_), static_cast<T>(v14_),
+ static_cast<T>(v15_), static_cast<T>(v16_), static_cast<T>(v17_),
+ static_cast<T>(v18_), static_cast<T>(v19_), static_cast<T>(v20_),
+ static_cast<T>(v21_), static_cast<T>(v22_), static_cast<T>(v23_),
+ static_cast<T>(v24_), static_cast<T>(v25_), static_cast<T>(v26_),
+ static_cast<T>(v27_), static_cast<T>(v28_), static_cast<T>(v29_),
+ static_cast<T>(v30_), static_cast<T>(v31_), static_cast<T>(v32_),
+ static_cast<T>(v33_), static_cast<T>(v34_), static_cast<T>(v35_),
+ static_cast<T>(v36_), static_cast<T>(v37_), static_cast<T>(v38_),
+ static_cast<T>(v39_), static_cast<T>(v40_), static_cast<T>(v41_),
+ static_cast<T>(v42_), static_cast<T>(v43_), static_cast<T>(v44_),
+ static_cast<T>(v45_), static_cast<T>(v46_), static_cast<T>(v47_),
+ static_cast<T>(v48_), static_cast<T>(v49_), static_cast<T>(v50_)};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray50& other);
+
+ const T1 v1_;
+ const T2 v2_;
+ const T3 v3_;
+ const T4 v4_;
+ const T5 v5_;
+ const T6 v6_;
+ const T7 v7_;
+ const T8 v8_;
+ const T9 v9_;
+ const T10 v10_;
+ const T11 v11_;
+ const T12 v12_;
+ const T13 v13_;
+ const T14 v14_;
+ const T15 v15_;
+ const T16 v16_;
+ const T17 v17_;
+ const T18 v18_;
+ const T19 v19_;
+ const T20 v20_;
+ const T21 v21_;
+ const T22 v22_;
+ const T23 v23_;
+ const T24 v24_;
+ const T25 v25_;
+ const T26 v26_;
+ const T27 v27_;
+ const T28 v28_;
+ const T29 v29_;
+ const T30 v30_;
+ const T31 v31_;
+ const T32 v32_;
+ const T33 v33_;
+ const T34 v34_;
+ const T35 v35_;
+ const T36 v36_;
+ const T37 v37_;
+ const T38 v38_;
+ const T39 v39_;
+ const T40 v40_;
+ const T41 v41_;
+ const T42 v42_;
+ const T43 v43_;
+ const T44 v44_;
+ const T45 v45_;
+ const T46 v46_;
+ const T47 v47_;
+ const T48 v48_;
+ const T49 v49_;
+ const T50 v50_;
+};
+
+# if GTEST_HAS_COMBINE
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Generates values from the Cartesian product of values produced
+// by the argument generators.
+//
+template <typename T1, typename T2>
+class CartesianProductGenerator2
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2> > {
+ public:
+ typedef ::testing::tuple<T1, T2> ParamType;
+
+ CartesianProductGenerator2(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2)
+ : g1_(g1), g2_(g2) {}
+ virtual ~CartesianProductGenerator2() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current2_;
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator2::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator2& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+}; // class CartesianProductGenerator2
+
+
+template <typename T1, typename T2, typename T3>
+class CartesianProductGenerator3
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3> ParamType;
+
+ CartesianProductGenerator3(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ virtual ~CartesianProductGenerator3() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current3_;
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator3::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator3& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+}; // class CartesianProductGenerator3
+
+
+template <typename T1, typename T2, typename T3, typename T4>
+class CartesianProductGenerator4
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4> ParamType;
+
+ CartesianProductGenerator4(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ virtual ~CartesianProductGenerator4() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current4_;
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator4::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator4& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+}; // class CartesianProductGenerator4
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+class CartesianProductGenerator5
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4, T5> ParamType;
+
+ CartesianProductGenerator5(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ virtual ~CartesianProductGenerator5() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current5_;
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator5::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator5& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+}; // class CartesianProductGenerator5
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+class CartesianProductGenerator6
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5,
+ T6> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4, T5, T6> ParamType;
+
+ CartesianProductGenerator6(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ virtual ~CartesianProductGenerator6() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current6_;
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator6::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator6& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+}; // class CartesianProductGenerator6
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+class CartesianProductGenerator7
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+ T7> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> ParamType;
+
+ CartesianProductGenerator7(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ virtual ~CartesianProductGenerator7() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current7_;
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator7::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator7& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+}; // class CartesianProductGenerator7
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+class CartesianProductGenerator8
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> ParamType;
+
+ CartesianProductGenerator8(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ virtual ~CartesianProductGenerator8() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current8_;
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator8::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator8& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+}; // class CartesianProductGenerator8
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+class CartesianProductGenerator9
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9> ParamType;
+
+ CartesianProductGenerator9(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ virtual ~CartesianProductGenerator9() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current9_;
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator9::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator9& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+}; // class CartesianProductGenerator9
+
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+class CartesianProductGenerator10
+ : public ParamGeneratorInterface< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+ T7, T8, T9, T10> > {
+ public:
+ typedef ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> ParamType;
+
+ CartesianProductGenerator10(const ParamGenerator<T1>& g1,
+ const ParamGenerator<T2>& g2, const ParamGenerator<T3>& g3,
+ const ParamGenerator<T4>& g4, const ParamGenerator<T5>& g5,
+ const ParamGenerator<T6>& g6, const ParamGenerator<T7>& g7,
+ const ParamGenerator<T8>& g8, const ParamGenerator<T9>& g9,
+ const ParamGenerator<T10>& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ virtual ~CartesianProductGenerator10() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_,
+ g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_,
+ g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin());
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(),
+ g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_,
+ g8_.end(), g9_, g9_.end(), g10_, g10_.end());
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base,
+ const ParamGenerator<T1>& g1,
+ const typename ParamGenerator<T1>::iterator& current1,
+ const ParamGenerator<T2>& g2,
+ const typename ParamGenerator<T2>::iterator& current2,
+ const ParamGenerator<T3>& g3,
+ const typename ParamGenerator<T3>::iterator& current3,
+ const ParamGenerator<T4>& g4,
+ const typename ParamGenerator<T4>::iterator& current4,
+ const ParamGenerator<T5>& g5,
+ const typename ParamGenerator<T5>::iterator& current5,
+ const ParamGenerator<T6>& g6,
+ const typename ParamGenerator<T6>::iterator& current6,
+ const ParamGenerator<T7>& g7,
+ const typename ParamGenerator<T7>::iterator& current7,
+ const ParamGenerator<T8>& g8,
+ const typename ParamGenerator<T8>::iterator& current8,
+ const ParamGenerator<T9>& g9,
+ const typename ParamGenerator<T9>::iterator& current9,
+ const ParamGenerator<T10>& g10,
+ const typename ParamGenerator<T10>::iterator& current10)
+ : base_(base),
+ begin1_(g1.begin()), end1_(g1.end()), current1_(current1),
+ begin2_(g2.begin()), end2_(g2.end()), current2_(current2),
+ begin3_(g3.begin()), end3_(g3.end()), current3_(current3),
+ begin4_(g4.begin()), end4_(g4.end()), current4_(current4),
+ begin5_(g5.begin()), end5_(g5.end()), current5_(current5),
+ begin6_(g6.begin()), end6_(g6.end()), current6_(current6),
+ begin7_(g7.begin()), end7_(g7.end()), current7_(current7),
+ begin8_(g8.begin()), end8_(g8.end()), current8_(current8),
+ begin9_(g9.begin()), end9_(g9.end()), current9_(current9),
+ begin10_(g10.begin()), end10_(g10.end()), current10_(current10) {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current10_;
+ if (current10_ == end10_) {
+ current10_ = begin10_;
+ ++current9_;
+ }
+ if (current9_ == end9_) {
+ current9_ = begin9_;
+ ++current8_;
+ }
+ if (current8_ == end8_) {
+ current8_ = begin8_;
+ ++current7_;
+ }
+ if (current7_ == end7_) {
+ current7_ = begin7_;
+ ++current6_;
+ }
+ if (current6_ == end6_) {
+ current6_ = begin6_;
+ ++current5_;
+ }
+ if (current5_ == end5_) {
+ current5_ = begin5_;
+ ++current4_;
+ }
+ if (current4_ == end4_) {
+ current4_ = begin4_;
+ ++current3_;
+ }
+ if (current3_ == end3_) {
+ current3_ = begin3_;
+ ++current2_;
+ }
+ if (current2_ == end2_) {
+ current2_ = begin2_;
+ ++current1_;
+ }
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ (
+ current1_ == typed_other->current1_ &&
+ current2_ == typed_other->current2_ &&
+ current3_ == typed_other->current3_ &&
+ current4_ == typed_other->current4_ &&
+ current5_ == typed_other->current5_ &&
+ current6_ == typed_other->current6_ &&
+ current7_ == typed_other->current7_ &&
+ current8_ == typed_other->current8_ &&
+ current9_ == typed_other->current9_ &&
+ current10_ == typed_other->current10_);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_),
+ begin1_(other.begin1_),
+ end1_(other.end1_),
+ current1_(other.current1_),
+ begin2_(other.begin2_),
+ end2_(other.end2_),
+ current2_(other.current2_),
+ begin3_(other.begin3_),
+ end3_(other.end3_),
+ current3_(other.current3_),
+ begin4_(other.begin4_),
+ end4_(other.end4_),
+ current4_(other.current4_),
+ begin5_(other.begin5_),
+ end5_(other.end5_),
+ current5_(other.current5_),
+ begin6_(other.begin6_),
+ end6_(other.end6_),
+ current6_(other.current6_),
+ begin7_(other.begin7_),
+ end7_(other.end7_),
+ current7_(other.current7_),
+ begin8_(other.begin8_),
+ end8_(other.end8_),
+ current8_(other.current8_),
+ begin9_(other.begin9_),
+ end9_(other.end9_),
+ current9_(other.current9_),
+ begin10_(other.begin10_),
+ end10_(other.end10_),
+ current10_(other.current10_) {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType(*current1_, *current2_, *current3_,
+ *current4_, *current5_, *current6_, *current7_, *current8_,
+ *current9_, *current10_);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+ current1_ == end1_ ||
+ current2_ == end2_ ||
+ current3_ == end3_ ||
+ current4_ == end4_ ||
+ current5_ == end5_ ||
+ current6_ == end6_ ||
+ current7_ == end7_ ||
+ current8_ == end8_ ||
+ current9_ == end9_ ||
+ current10_ == end10_;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+ const typename ParamGenerator<T1>::iterator begin1_;
+ const typename ParamGenerator<T1>::iterator end1_;
+ typename ParamGenerator<T1>::iterator current1_;
+ const typename ParamGenerator<T2>::iterator begin2_;
+ const typename ParamGenerator<T2>::iterator end2_;
+ typename ParamGenerator<T2>::iterator current2_;
+ const typename ParamGenerator<T3>::iterator begin3_;
+ const typename ParamGenerator<T3>::iterator end3_;
+ typename ParamGenerator<T3>::iterator current3_;
+ const typename ParamGenerator<T4>::iterator begin4_;
+ const typename ParamGenerator<T4>::iterator end4_;
+ typename ParamGenerator<T4>::iterator current4_;
+ const typename ParamGenerator<T5>::iterator begin5_;
+ const typename ParamGenerator<T5>::iterator end5_;
+ typename ParamGenerator<T5>::iterator current5_;
+ const typename ParamGenerator<T6>::iterator begin6_;
+ const typename ParamGenerator<T6>::iterator end6_;
+ typename ParamGenerator<T6>::iterator current6_;
+ const typename ParamGenerator<T7>::iterator begin7_;
+ const typename ParamGenerator<T7>::iterator end7_;
+ typename ParamGenerator<T7>::iterator current7_;
+ const typename ParamGenerator<T8>::iterator begin8_;
+ const typename ParamGenerator<T8>::iterator end8_;
+ typename ParamGenerator<T8>::iterator current8_;
+ const typename ParamGenerator<T9>::iterator begin9_;
+ const typename ParamGenerator<T9>::iterator end9_;
+ typename ParamGenerator<T9>::iterator current9_;
+ const typename ParamGenerator<T10>::iterator begin10_;
+ const typename ParamGenerator<T10>::iterator end10_;
+ typename ParamGenerator<T10>::iterator current10_;
+ ParamType current_value_;
+ }; // class CartesianProductGenerator10::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator10& other);
+
+ const ParamGenerator<T1> g1_;
+ const ParamGenerator<T2> g2_;
+ const ParamGenerator<T3> g3_;
+ const ParamGenerator<T4> g4_;
+ const ParamGenerator<T5> g5_;
+ const ParamGenerator<T6> g6_;
+ const ParamGenerator<T7> g7_;
+ const ParamGenerator<T8> g8_;
+ const ParamGenerator<T9> g9_;
+ const ParamGenerator<T10> g10_;
+}; // class CartesianProductGenerator10
+
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Helper classes providing Combine() with polymorphic features. They allow
+// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
+// convertible to U.
+//
+template <class Generator1, class Generator2>
+class CartesianProductHolder2 {
+ public:
+CartesianProductHolder2(const Generator1& g1, const Generator2& g2)
+ : g1_(g1), g2_(g2) {}
+ template <typename T1, typename T2>
+ operator ParamGenerator< ::testing::tuple<T1, T2> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2> >(
+ new CartesianProductGenerator2<T1, T2>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder2& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+}; // class CartesianProductHolder2
+
+template <class Generator1, class Generator2, class Generator3>
+class CartesianProductHolder3 {
+ public:
+CartesianProductHolder3(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3)
+ : g1_(g1), g2_(g2), g3_(g3) {}
+ template <typename T1, typename T2, typename T3>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3> >(
+ new CartesianProductGenerator3<T1, T2, T3>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder3& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+}; // class CartesianProductHolder3
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4>
+class CartesianProductHolder4 {
+ public:
+CartesianProductHolder4(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {}
+ template <typename T1, typename T2, typename T3, typename T4>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4> >(
+ new CartesianProductGenerator4<T1, T2, T3, T4>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder4& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+}; // class CartesianProductHolder4
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5>
+class CartesianProductHolder5 {
+ public:
+CartesianProductHolder5(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5> >(
+ new CartesianProductGenerator5<T1, T2, T3, T4, T5>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder5& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+}; // class CartesianProductHolder5
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6>
+class CartesianProductHolder6 {
+ public:
+CartesianProductHolder6(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6> >(
+ new CartesianProductGenerator6<T1, T2, T3, T4, T5, T6>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder6& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+}; // class CartesianProductHolder6
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7>
+class CartesianProductHolder7 {
+ public:
+CartesianProductHolder7(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6,
+ T7> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7> >(
+ new CartesianProductGenerator7<T1, T2, T3, T4, T5, T6, T7>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder7& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+}; // class CartesianProductHolder7
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8>
+class CartesianProductHolder8 {
+ public:
+CartesianProductHolder8(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7),
+ g8_(g8) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7,
+ T8> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8> >(
+ new CartesianProductGenerator8<T1, T2, T3, T4, T5, T6, T7, T8>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder8& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+}; // class CartesianProductHolder8
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9>
+class CartesianProductHolder9 {
+ public:
+CartesianProductHolder9(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8,
+ T9> >(
+ new CartesianProductGenerator9<T1, T2, T3, T4, T5, T6, T7, T8, T9>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder9& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+}; // class CartesianProductHolder9
+
+template <class Generator1, class Generator2, class Generator3,
+ class Generator4, class Generator5, class Generator6, class Generator7,
+ class Generator8, class Generator9, class Generator10>
+class CartesianProductHolder10 {
+ public:
+CartesianProductHolder10(const Generator1& g1, const Generator2& g2,
+ const Generator3& g3, const Generator4& g4, const Generator5& g5,
+ const Generator6& g6, const Generator7& g7, const Generator8& g8,
+ const Generator9& g9, const Generator10& g10)
+ : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8),
+ g9_(g9), g10_(g10) {}
+ template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+ operator ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10> >() const {
+ return ParamGenerator< ::testing::tuple<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10> >(
+ new CartesianProductGenerator10<T1, T2, T3, T4, T5, T6, T7, T8, T9,
+ T10>(
+ static_cast<ParamGenerator<T1> >(g1_),
+ static_cast<ParamGenerator<T2> >(g2_),
+ static_cast<ParamGenerator<T3> >(g3_),
+ static_cast<ParamGenerator<T4> >(g4_),
+ static_cast<ParamGenerator<T5> >(g5_),
+ static_cast<ParamGenerator<T6> >(g6_),
+ static_cast<ParamGenerator<T7> >(g7_),
+ static_cast<ParamGenerator<T8> >(g8_),
+ static_cast<ParamGenerator<T9> >(g9_),
+ static_cast<ParamGenerator<T10> >(g10_)));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder10& other);
+
+ const Generator1 g1_;
+ const Generator2 g2_;
+ const Generator3 g3_;
+ const Generator4 g4_;
+ const Generator5 g5_;
+ const Generator6 g6_;
+ const Generator7 g7_;
+ const Generator8 g8_;
+ const Generator9 g9_;
+ const Generator10 g10_;
+}; // class CartesianProductHolder10
+
+# endif // GTEST_HAS_COMBINE
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h.pump b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h.pump
new file mode 100644
index 000000000..5c7c47af0
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util-generated.h.pump
@@ -0,0 +1,286 @@
+$$ -*- mode: c++; -*-
+$var n = 50 $$ Maximum length of Values arguments we want to support.
+$var maxtuple = 10 $$ Maximum number of Combine arguments we want to support.
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+// This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently Google Test supports at most $n arguments in Values,
+// and at most $maxtuple arguments in Combine. Please contact
+// googletestframework@googlegroups.com if you need more.
+// Please note that the number of arguments to Combine is limited
+// by the maximum arity of the implementation of tuple which is
+// currently set at $maxtuple.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include "gtest/internal/gtest-param-util.h"
+#include "gtest/internal/gtest-port.h"
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Forward declarations of ValuesIn(), which is implemented in
+// include/gtest/gtest-param-test.h.
+template <typename ForwardIterator>
+internal::ParamGenerator<
+ typename ::testing::internal::IteratorTraits<ForwardIterator>::value_type>
+ValuesIn(ForwardIterator begin, ForwardIterator end);
+
+template <typename T, size_t N>
+internal::ParamGenerator<T> ValuesIn(const T (&array)[N]);
+
+template <class Container>
+internal::ParamGenerator<typename Container::value_type> ValuesIn(
+ const Container& container);
+
+namespace internal {
+
+// Used in the Values() function to provide polymorphic capabilities.
+$range i 1..n
+$for i [[
+$range j 1..i
+
+template <$for j, [[typename T$j]]>
+class ValueArray$i {
+ public:
+ $if i==1 [[explicit ]]ValueArray$i($for j, [[T$j v$j]]) : $for j, [[v$(j)_(v$j)]] {}
+
+ template <typename T>
+ operator ParamGenerator<T>() const {
+ const T array[] = {$for j, [[static_cast<T>(v$(j)_)]]};
+ return ValuesIn(array);
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const ValueArray$i& other);
+
+$for j [[
+
+ const T$j v$(j)_;
+]]
+
+};
+
+]]
+
+# if GTEST_HAS_COMBINE
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Generates values from the Cartesian product of values produced
+// by the argument generators.
+//
+$range i 2..maxtuple
+$for i [[
+$range j 1..i
+$range k 2..i
+
+template <$for j, [[typename T$j]]>
+class CartesianProductGenerator$i
+ : public ParamGeneratorInterface< ::testing::tuple<$for j, [[T$j]]> > {
+ public:
+ typedef ::testing::tuple<$for j, [[T$j]]> ParamType;
+
+ CartesianProductGenerator$i($for j, [[const ParamGenerator<T$j>& g$j]])
+ : $for j, [[g$(j)_(g$j)]] {}
+ virtual ~CartesianProductGenerator$i() {}
+
+ virtual ParamIteratorInterface<ParamType>* Begin() const {
+ return new Iterator(this, $for j, [[g$(j)_, g$(j)_.begin()]]);
+ }
+ virtual ParamIteratorInterface<ParamType>* End() const {
+ return new Iterator(this, $for j, [[g$(j)_, g$(j)_.end()]]);
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<ParamType> {
+ public:
+ Iterator(const ParamGeneratorInterface<ParamType>* base, $for j, [[
+
+ const ParamGenerator<T$j>& g$j,
+ const typename ParamGenerator<T$j>::iterator& current$(j)]])
+ : base_(base),
+$for j, [[
+
+ begin$(j)_(g$j.begin()), end$(j)_(g$j.end()), current$(j)_(current$j)
+]] {
+ ComputeCurrentValue();
+ }
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<ParamType>* BaseGenerator() const {
+ return base_;
+ }
+ // Advance should not be called on beyond-of-range iterators
+ // so no component iterators must be beyond end of range, either.
+ virtual void Advance() {
+ assert(!AtEnd());
+ ++current$(i)_;
+
+$for k [[
+ if (current$(i+2-k)_ == end$(i+2-k)_) {
+ current$(i+2-k)_ = begin$(i+2-k)_;
+ ++current$(i+2-k-1)_;
+ }
+
+]]
+ ComputeCurrentValue();
+ }
+ virtual ParamIteratorInterface<ParamType>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const ParamType* Current() const { return &current_value_; }
+ virtual bool Equals(const ParamIteratorInterface<ParamType>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const Iterator* typed_other =
+ CheckedDowncastToActualType<const Iterator>(&other);
+ // We must report iterators equal if they both point beyond their
+ // respective ranges. That can happen in a variety of fashions,
+ // so we have to consult AtEnd().
+ return (AtEnd() && typed_other->AtEnd()) ||
+ ($for j && [[
+
+ current$(j)_ == typed_other->current$(j)_
+]]);
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : base_(other.base_), $for j, [[
+
+ begin$(j)_(other.begin$(j)_),
+ end$(j)_(other.end$(j)_),
+ current$(j)_(other.current$(j)_)
+]] {
+ ComputeCurrentValue();
+ }
+
+ void ComputeCurrentValue() {
+ if (!AtEnd())
+ current_value_ = ParamType($for j, [[*current$(j)_]]);
+ }
+ bool AtEnd() const {
+ // We must report iterator past the end of the range when either of the
+ // component iterators has reached the end of its range.
+ return
+$for j || [[
+
+ current$(j)_ == end$(j)_
+]];
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<ParamType>* const base_;
+ // begin[i]_ and end[i]_ define the i-th range that Iterator traverses.
+ // current[i]_ is the actual traversing iterator.
+$for j [[
+
+ const typename ParamGenerator<T$j>::iterator begin$(j)_;
+ const typename ParamGenerator<T$j>::iterator end$(j)_;
+ typename ParamGenerator<T$j>::iterator current$(j)_;
+]]
+
+ ParamType current_value_;
+ }; // class CartesianProductGenerator$i::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductGenerator$i& other);
+
+
+$for j [[
+ const ParamGenerator<T$j> g$(j)_;
+
+]]
+}; // class CartesianProductGenerator$i
+
+
+]]
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Helper classes providing Combine() with polymorphic features. They allow
+// casting CartesianProductGeneratorN<T> to ParamGenerator<U> if T is
+// convertible to U.
+//
+$range i 2..maxtuple
+$for i [[
+$range j 1..i
+
+template <$for j, [[class Generator$j]]>
+class CartesianProductHolder$i {
+ public:
+CartesianProductHolder$i($for j, [[const Generator$j& g$j]])
+ : $for j, [[g$(j)_(g$j)]] {}
+ template <$for j, [[typename T$j]]>
+ operator ParamGenerator< ::testing::tuple<$for j, [[T$j]]> >() const {
+ return ParamGenerator< ::testing::tuple<$for j, [[T$j]]> >(
+ new CartesianProductGenerator$i<$for j, [[T$j]]>(
+$for j,[[
+
+ static_cast<ParamGenerator<T$j> >(g$(j)_)
+]]));
+ }
+
+ private:
+ // No implementation - assignment is unsupported.
+ void operator=(const CartesianProductHolder$i& other);
+
+
+$for j [[
+ const Generator$j g$(j)_;
+
+]]
+}; // class CartesianProductHolder$i
+
+]]
+
+# endif // GTEST_HAS_COMBINE
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util.h
new file mode 100644
index 000000000..82cab9b02
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-param-util.h
@@ -0,0 +1,731 @@
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: vladl@google.com (Vlad Losev)
+
+// Type and function utilities for implementing parameterized tests.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
+
+#include <ctype.h>
+
+#include <iterator>
+#include <set>
+#include <utility>
+#include <vector>
+
+// scripts/fuse_gtest.py depends on gtest's own header being #included
+// *unconditionally*. Therefore these #includes cannot be moved
+// inside #if GTEST_HAS_PARAM_TEST.
+#include "gtest/internal/gtest-internal.h"
+#include "gtest/internal/gtest-linked_ptr.h"
+#include "gtest/internal/gtest-port.h"
+#include "gtest/gtest-printers.h"
+
+#if GTEST_HAS_PARAM_TEST
+
+namespace testing {
+
+// Input to a parameterized test name generator, describing a test parameter.
+// Consists of the parameter value and the integer parameter index.
+template <class ParamType>
+struct TestParamInfo {
+ TestParamInfo(const ParamType& a_param, size_t an_index) :
+ param(a_param),
+ index(an_index) {}
+ ParamType param;
+ size_t index;
+};
+
+// A builtin parameterized test name generator which returns the result of
+// testing::PrintToString.
+struct PrintToStringParamName {
+ template <class ParamType>
+ std::string operator()(const TestParamInfo<ParamType>& info) const {
+ return PrintToString(info.param);
+ }
+};
+
+namespace internal {
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Outputs a message explaining invalid registration of different
+// fixture class for the same test case. This may happen when
+// TEST_P macro is used to define two tests with the same name
+// but in different namespaces.
+GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name,
+ CodeLocation code_location);
+
+template <typename> class ParamGeneratorInterface;
+template <typename> class ParamGenerator;
+
+// Interface for iterating over elements provided by an implementation
+// of ParamGeneratorInterface<T>.
+template <typename T>
+class ParamIteratorInterface {
+ public:
+ virtual ~ParamIteratorInterface() {}
+ // A pointer to the base generator instance.
+ // Used only for the purposes of iterator comparison
+ // to make sure that two iterators belong to the same generator.
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const = 0;
+ // Advances iterator to point to the next element
+ // provided by the generator. The caller is responsible
+ // for not calling Advance() on an iterator equal to
+ // BaseGenerator()->End().
+ virtual void Advance() = 0;
+ // Clones the iterator object. Used for implementing copy semantics
+ // of ParamIterator<T>.
+ virtual ParamIteratorInterface* Clone() const = 0;
+ // Dereferences the current iterator and provides (read-only) access
+ // to the pointed value. It is the caller's responsibility not to call
+ // Current() on an iterator equal to BaseGenerator()->End().
+ // Used for implementing ParamGenerator<T>::operator*().
+ virtual const T* Current() const = 0;
+ // Determines whether the given iterator and other point to the same
+ // element in the sequence generated by the generator.
+ // Used for implementing ParamGenerator<T>::operator==().
+ virtual bool Equals(const ParamIteratorInterface& other) const = 0;
+};
+
+// Class iterating over elements provided by an implementation of
+// ParamGeneratorInterface<T>. It wraps ParamIteratorInterface<T>
+// and implements the const forward iterator concept.
+template <typename T>
+class ParamIterator {
+ public:
+ typedef T value_type;
+ typedef const T& reference;
+ typedef ptrdiff_t difference_type;
+
+ // ParamIterator assumes ownership of the impl_ pointer.
+ ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {}
+ ParamIterator& operator=(const ParamIterator& other) {
+ if (this != &other)
+ impl_.reset(other.impl_->Clone());
+ return *this;
+ }
+
+ const T& operator*() const { return *impl_->Current(); }
+ const T* operator->() const { return impl_->Current(); }
+ // Prefix version of operator++.
+ ParamIterator& operator++() {
+ impl_->Advance();
+ return *this;
+ }
+ // Postfix version of operator++.
+ ParamIterator operator++(int /*unused*/) {
+ ParamIteratorInterface<T>* clone = impl_->Clone();
+ impl_->Advance();
+ return ParamIterator(clone);
+ }
+ bool operator==(const ParamIterator& other) const {
+ return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_);
+ }
+ bool operator!=(const ParamIterator& other) const {
+ return !(*this == other);
+ }
+
+ private:
+ friend class ParamGenerator<T>;
+ explicit ParamIterator(ParamIteratorInterface<T>* impl) : impl_(impl) {}
+ scoped_ptr<ParamIteratorInterface<T> > impl_;
+};
+
+// ParamGeneratorInterface<T> is the binary interface to access generators
+// defined in other translation units.
+template <typename T>
+class ParamGeneratorInterface {
+ public:
+ typedef T ParamType;
+
+ virtual ~ParamGeneratorInterface() {}
+
+ // Generator interface definition
+ virtual ParamIteratorInterface<T>* Begin() const = 0;
+ virtual ParamIteratorInterface<T>* End() const = 0;
+};
+
+// Wraps ParamGeneratorInterface<T> and provides general generator syntax
+// compatible with the STL Container concept.
+// This class implements copy initialization semantics and the contained
+// ParamGeneratorInterface<T> instance is shared among all copies
+// of the original object. This is possible because that instance is immutable.
+template<typename T>
+class ParamGenerator {
+ public:
+ typedef ParamIterator<T> iterator;
+
+ explicit ParamGenerator(ParamGeneratorInterface<T>* impl) : impl_(impl) {}
+ ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {}
+
+ ParamGenerator& operator=(const ParamGenerator& other) {
+ impl_ = other.impl_;
+ return *this;
+ }
+
+ iterator begin() const { return iterator(impl_->Begin()); }
+ iterator end() const { return iterator(impl_->End()); }
+
+ private:
+ linked_ptr<const ParamGeneratorInterface<T> > impl_;
+};
+
+// Generates values from a range of two comparable values. Can be used to
+// generate sequences of user-defined types that implement operator+() and
+// operator<().
+// This class is used in the Range() function.
+template <typename T, typename IncrementT>
+class RangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ RangeGenerator(T begin, T end, IncrementT step)
+ : begin_(begin), end_(end),
+ step_(step), end_index_(CalculateEndIndex(begin, end, step)) {}
+ virtual ~RangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, begin_, 0, step_);
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, end_, end_index_, step_);
+ }
+
+ private:
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base, T value, int index,
+ IncrementT step)
+ : base_(base), value_(value), index_(index), step_(step) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ value_ = static_cast<T>(value_ + step_);
+ index_++;
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ virtual const T* Current() const { return &value_; }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ const int other_index =
+ CheckedDowncastToActualType<const Iterator>(&other)->index_;
+ return index_ == other_index;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ : ParamIteratorInterface<T>(),
+ base_(other.base_), value_(other.value_), index_(other.index_),
+ step_(other.step_) {}
+
+ // No implementation - assignment is unsupported.
+ void operator=(const Iterator& other);
+
+ const ParamGeneratorInterface<T>* const base_;
+ T value_;
+ int index_;
+ const IncrementT step_;
+ }; // class RangeGenerator::Iterator
+
+ static int CalculateEndIndex(const T& begin,
+ const T& end,
+ const IncrementT& step) {
+ int end_index = 0;
+ for (T i = begin; i < end; i = static_cast<T>(i + step))
+ end_index++;
+ return end_index;
+ }
+
+ // No implementation - assignment is unsupported.
+ void operator=(const RangeGenerator& other);
+
+ const T begin_;
+ const T end_;
+ const IncrementT step_;
+ // The index for the end() iterator. All the elements in the generated
+ // sequence are indexed (0-based) to aid iterator comparison.
+ const int end_index_;
+}; // class RangeGenerator
+
+
+// Generates values from a pair of STL-style iterators. Used in the
+// ValuesIn() function. The elements are copied from the source range
+// since the source can be located on the stack, and the generator
+// is likely to persist beyond that stack frame.
+template <typename T>
+class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface<T> {
+ public:
+ template <typename ForwardIterator>
+ ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end)
+ : container_(begin, end) {}
+ virtual ~ValuesInIteratorRangeGenerator() {}
+
+ virtual ParamIteratorInterface<T>* Begin() const {
+ return new Iterator(this, container_.begin());
+ }
+ virtual ParamIteratorInterface<T>* End() const {
+ return new Iterator(this, container_.end());
+ }
+
+ private:
+ typedef typename ::std::vector<T> ContainerType;
+
+ class Iterator : public ParamIteratorInterface<T> {
+ public:
+ Iterator(const ParamGeneratorInterface<T>* base,
+ typename ContainerType::const_iterator iterator)
+ : base_(base), iterator_(iterator) {}
+ virtual ~Iterator() {}
+
+ virtual const ParamGeneratorInterface<T>* BaseGenerator() const {
+ return base_;
+ }
+ virtual void Advance() {
+ ++iterator_;
+ value_.reset();
+ }
+ virtual ParamIteratorInterface<T>* Clone() const {
+ return new Iterator(*this);
+ }
+ // We need to use cached value referenced by iterator_ because *iterator_
+ // can return a temporary object (and of type other then T), so just
+ // having "return &*iterator_;" doesn't work.
+ // value_ is updated here and not in Advance() because Advance()
+ // can advance iterator_ beyond the end of the range, and we cannot
+ // detect that fact. The client code, on the other hand, is
+ // responsible for not calling Current() on an out-of-range iterator.
+ virtual const T* Current() const {
+ if (value_.get() == NULL)
+ value_.reset(new T(*iterator_));
+ return value_.get();
+ }
+ virtual bool Equals(const ParamIteratorInterface<T>& other) const {
+ // Having the same base generator guarantees that the other
+ // iterator is of the same type and we can downcast.
+ GTEST_CHECK_(BaseGenerator() == other.BaseGenerator())
+ << "The program attempted to compare iterators "
+ << "from different generators." << std::endl;
+ return iterator_ ==
+ CheckedDowncastToActualType<const Iterator>(&other)->iterator_;
+ }
+
+ private:
+ Iterator(const Iterator& other)
+ // The explicit constructor call suppresses a false warning
+ // emitted by gcc when supplied with the -Wextra option.
+ : ParamIteratorInterface<T>(),
+ base_(other.base_),
+ iterator_(other.iterator_) {}
+
+ const ParamGeneratorInterface<T>* const base_;
+ typename ContainerType::const_iterator iterator_;
+ // A cached value of *iterator_. We keep it here to allow access by
+ // pointer in the wrapping iterator's operator->().
+ // value_ needs to be mutable to be accessed in Current().
+ // Use of scoped_ptr helps manage cached value's lifetime,
+ // which is bound by the lifespan of the iterator itself.
+ mutable scoped_ptr<const T> value_;
+ }; // class ValuesInIteratorRangeGenerator::Iterator
+
+ // No implementation - assignment is unsupported.
+ void operator=(const ValuesInIteratorRangeGenerator& other);
+
+ const ContainerType container_;
+}; // class ValuesInIteratorRangeGenerator
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Default parameterized test name generator, returns a string containing the
+// integer test parameter index.
+template <class ParamType>
+std::string DefaultParamName(const TestParamInfo<ParamType>& info) {
+ Message name_stream;
+ name_stream << info.index;
+ return name_stream.GetString();
+}
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Parameterized test name overload helpers, which help the
+// INSTANTIATE_TEST_CASE_P macro choose between the default parameterized
+// test name generator and user param name generator.
+template <class ParamType, class ParamNameGenFunctor>
+ParamNameGenFunctor GetParamNameGen(ParamNameGenFunctor func) {
+ return func;
+}
+
+template <class ParamType>
+struct ParamNameGenFunc {
+ typedef std::string Type(const TestParamInfo<ParamType>&);
+};
+
+template <class ParamType>
+typename ParamNameGenFunc<ParamType>::Type *GetParamNameGen() {
+ return DefaultParamName;
+}
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Stores a parameter value and later creates tests parameterized with that
+// value.
+template <class TestClass>
+class ParameterizedTestFactory : public TestFactoryBase {
+ public:
+ typedef typename TestClass::ParamType ParamType;
+ explicit ParameterizedTestFactory(ParamType parameter) :
+ parameter_(parameter) {}
+ virtual Test* CreateTest() {
+ TestClass::SetParam(&parameter_);
+ return new TestClass();
+ }
+
+ private:
+ const ParamType parameter_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactoryBase is a base class for meta-factories that create
+// test factories for passing into MakeAndRegisterTestInfo function.
+template <class ParamType>
+class TestMetaFactoryBase {
+ public:
+ virtual ~TestMetaFactoryBase() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0;
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// TestMetaFactory creates test factories for passing into
+// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives
+// ownership of test factory pointer, same factory object cannot be passed
+// into that method twice. But ParameterizedTestCaseInfo is going to call
+// it for each Test/Parameter value combination. Thus it needs meta factory
+// creator class.
+template <class TestCase>
+class TestMetaFactory
+ : public TestMetaFactoryBase<typename TestCase::ParamType> {
+ public:
+ typedef typename TestCase::ParamType ParamType;
+
+ TestMetaFactory() {}
+
+ virtual TestFactoryBase* CreateTestFactory(ParamType parameter) {
+ return new ParameterizedTestFactory<TestCase>(parameter);
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfoBase is a generic interface
+// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase
+// accumulates test information provided by TEST_P macro invocations
+// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations
+// and uses that information to register all resulting test instances
+// in RegisterTests method. The ParameterizeTestCaseRegistry class holds
+// a collection of pointers to the ParameterizedTestCaseInfo objects
+// and calls RegisterTests() on each of them when asked.
+class ParameterizedTestCaseInfoBase {
+ public:
+ virtual ~ParameterizedTestCaseInfoBase() {}
+
+ // Base part of test case name for display purposes.
+ virtual const string& GetTestCaseName() const = 0;
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const = 0;
+ // UnitTest class invokes this method to register tests in this
+ // test case right before running them in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ virtual void RegisterTests() = 0;
+
+ protected:
+ ParameterizedTestCaseInfoBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase);
+};
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P
+// macro invocations for a particular test case and generators
+// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that
+// test case. It registers tests with all values generated by all
+// generators when asked.
+template <class TestCase>
+class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase {
+ public:
+ // ParamType and GeneratorCreationFunc are private types but are required
+ // for declarations of public methods AddTestPattern() and
+ // AddTestCaseInstantiation().
+ typedef typename TestCase::ParamType ParamType;
+ // A function that returns an instance of appropriate generator type.
+ typedef ParamGenerator<ParamType>(GeneratorCreationFunc)();
+ typedef typename ParamNameGenFunc<ParamType>::Type ParamNameGeneratorFunc;
+
+ explicit ParameterizedTestCaseInfo(
+ const char* name, CodeLocation code_location)
+ : test_case_name_(name), code_location_(code_location) {}
+
+ // Test case base name for display purposes.
+ virtual const string& GetTestCaseName() const { return test_case_name_; }
+ // Test case id to verify identity.
+ virtual TypeId GetTestCaseTypeId() const { return GetTypeId<TestCase>(); }
+ // TEST_P macro uses AddTestPattern() to record information
+ // about a single test in a LocalTestInfo structure.
+ // test_case_name is the base name of the test case (without invocation
+ // prefix). test_base_name is the name of an individual test without
+ // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is
+ // test case base name and DoBar is test base name.
+ void AddTestPattern(const char* test_case_name,
+ const char* test_base_name,
+ TestMetaFactoryBase<ParamType>* meta_factory) {
+ tests_.push_back(linked_ptr<TestInfo>(new TestInfo(test_case_name,
+ test_base_name,
+ meta_factory)));
+ }
+ // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information
+ // about a generator.
+ int AddTestCaseInstantiation(const string& instantiation_name,
+ GeneratorCreationFunc* func,
+ ParamNameGeneratorFunc* name_func,
+ const char* file,
+ int line) {
+ instantiations_.push_back(
+ InstantiationInfo(instantiation_name, func, name_func, file, line));
+ return 0; // Return value used only to run this method in namespace scope.
+ }
+ // UnitTest class invokes this method to register tests in this test case
+ // test cases right before running tests in RUN_ALL_TESTS macro.
+ // This method should not be called more then once on any single
+ // instance of a ParameterizedTestCaseInfoBase derived class.
+ // UnitTest has a guard to prevent from calling this method more then once.
+ virtual void RegisterTests() {
+ for (typename TestInfoContainer::iterator test_it = tests_.begin();
+ test_it != tests_.end(); ++test_it) {
+ linked_ptr<TestInfo> test_info = *test_it;
+ for (typename InstantiationContainer::iterator gen_it =
+ instantiations_.begin(); gen_it != instantiations_.end();
+ ++gen_it) {
+ const string& instantiation_name = gen_it->name;
+ ParamGenerator<ParamType> generator((*gen_it->generator)());
+ ParamNameGeneratorFunc* name_func = gen_it->name_func;
+ const char* file = gen_it->file;
+ int line = gen_it->line;
+
+ string test_case_name;
+ if ( !instantiation_name.empty() )
+ test_case_name = instantiation_name + "/";
+ test_case_name += test_info->test_case_base_name;
+
+ size_t i = 0;
+ std::set<std::string> test_param_names;
+ for (typename ParamGenerator<ParamType>::iterator param_it =
+ generator.begin();
+ param_it != generator.end(); ++param_it, ++i) {
+ Message test_name_stream;
+
+ std::string param_name = name_func(
+ TestParamInfo<ParamType>(*param_it, i));
+
+ GTEST_CHECK_(IsValidParamName(param_name))
+ << "Parameterized test name '" << param_name
+ << "' is invalid, in " << file
+ << " line " << line << std::endl;
+
+ GTEST_CHECK_(test_param_names.count(param_name) == 0)
+ << "Duplicate parameterized test name '" << param_name
+ << "', in " << file << " line " << line << std::endl;
+
+ test_param_names.insert(param_name);
+
+ test_name_stream << test_info->test_base_name << "/" << param_name;
+ MakeAndRegisterTestInfo(
+ test_case_name.c_str(),
+ test_name_stream.GetString().c_str(),
+ NULL, // No type parameter.
+ PrintToString(*param_it).c_str(),
+ code_location_,
+ GetTestCaseTypeId(),
+ TestCase::SetUpTestCase,
+ TestCase::TearDownTestCase,
+ test_info->test_meta_factory->CreateTestFactory(*param_it));
+ } // for param_it
+ } // for gen_it
+ } // for test_it
+ } // RegisterTests
+
+ private:
+ // LocalTestInfo structure keeps information about a single test registered
+ // with TEST_P macro.
+ struct TestInfo {
+ TestInfo(const char* a_test_case_base_name,
+ const char* a_test_base_name,
+ TestMetaFactoryBase<ParamType>* a_test_meta_factory) :
+ test_case_base_name(a_test_case_base_name),
+ test_base_name(a_test_base_name),
+ test_meta_factory(a_test_meta_factory) {}
+
+ const string test_case_base_name;
+ const string test_base_name;
+ const scoped_ptr<TestMetaFactoryBase<ParamType> > test_meta_factory;
+ };
+ typedef ::std::vector<linked_ptr<TestInfo> > TestInfoContainer;
+ // Records data received from INSTANTIATE_TEST_CASE_P macros:
+ // <Instantiation name, Sequence generator creation function,
+ // Name generator function, Source file, Source line>
+ struct InstantiationInfo {
+ InstantiationInfo(const std::string &name_in,
+ GeneratorCreationFunc* generator_in,
+ ParamNameGeneratorFunc* name_func_in,
+ const char* file_in,
+ int line_in)
+ : name(name_in),
+ generator(generator_in),
+ name_func(name_func_in),
+ file(file_in),
+ line(line_in) {}
+
+ std::string name;
+ GeneratorCreationFunc* generator;
+ ParamNameGeneratorFunc* name_func;
+ const char* file;
+ int line;
+ };
+ typedef ::std::vector<InstantiationInfo> InstantiationContainer;
+
+ static bool IsValidParamName(const std::string& name) {
+ // Check for empty string
+ if (name.empty())
+ return false;
+
+ // Check for invalid characters
+ for (std::string::size_type index = 0; index < name.size(); ++index) {
+ if (!isalnum(name[index]) && name[index] != '_')
+ return false;
+ }
+
+ return true;
+ }
+
+ const string test_case_name_;
+ CodeLocation code_location_;
+ TestInfoContainer tests_;
+ InstantiationContainer instantiations_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo);
+}; // class ParameterizedTestCaseInfo
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase
+// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P
+// macros use it to locate their corresponding ParameterizedTestCaseInfo
+// descriptors.
+class ParameterizedTestCaseRegistry {
+ public:
+ ParameterizedTestCaseRegistry() {}
+ ~ParameterizedTestCaseRegistry() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ delete *it;
+ }
+ }
+
+ // Looks up or creates and returns a structure containing information about
+ // tests and instantiations of a particular test case.
+ template <class TestCase>
+ ParameterizedTestCaseInfo<TestCase>* GetTestCasePatternHolder(
+ const char* test_case_name,
+ CodeLocation code_location) {
+ ParameterizedTestCaseInfo<TestCase>* typed_test_info = NULL;
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ if ((*it)->GetTestCaseName() == test_case_name) {
+ if ((*it)->GetTestCaseTypeId() != GetTypeId<TestCase>()) {
+ // Complain about incorrect usage of Google Test facilities
+ // and terminate the program since we cannot guaranty correct
+ // test case setup and tear-down in this case.
+ ReportInvalidTestCaseType(test_case_name, code_location);
+ posix::Abort();
+ } else {
+ // At this point we are sure that the object we found is of the same
+ // type we are looking for, so we downcast it to that type
+ // without further checks.
+ typed_test_info = CheckedDowncastToActualType<
+ ParameterizedTestCaseInfo<TestCase> >(*it);
+ }
+ break;
+ }
+ }
+ if (typed_test_info == NULL) {
+ typed_test_info = new ParameterizedTestCaseInfo<TestCase>(
+ test_case_name, code_location);
+ test_case_infos_.push_back(typed_test_info);
+ }
+ return typed_test_info;
+ }
+ void RegisterTests() {
+ for (TestCaseInfoContainer::iterator it = test_case_infos_.begin();
+ it != test_case_infos_.end(); ++it) {
+ (*it)->RegisterTests();
+ }
+ }
+
+ private:
+ typedef ::std::vector<ParameterizedTestCaseInfoBase*> TestCaseInfoContainer;
+
+ TestCaseInfoContainer test_case_infos_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry);
+};
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_HAS_PARAM_TEST
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port-arch.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port-arch.h
new file mode 100644
index 000000000..74ab94905
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port-arch.h
@@ -0,0 +1,93 @@
+// Copyright 2015, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file defines the GTEST_OS_* macro.
+// It is separate from gtest-port.h so that custom/gtest-port.h can include it.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
+
+// Determines the platform on which Google Test is compiled.
+#ifdef __CYGWIN__
+# define GTEST_OS_CYGWIN 1
+#elif defined __SYMBIAN32__
+# define GTEST_OS_SYMBIAN 1
+#elif defined _WIN32
+# define GTEST_OS_WINDOWS 1
+# ifdef _WIN32_WCE
+# define GTEST_OS_WINDOWS_MOBILE 1
+# elif defined(__MINGW__) || defined(__MINGW32__)
+# define GTEST_OS_WINDOWS_MINGW 1
+# elif defined(WINAPI_FAMILY)
+# include <winapifamily.h>
+# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP)
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP)
+# define GTEST_OS_WINDOWS_PHONE 1
+# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP)
+# define GTEST_OS_WINDOWS_RT 1
+# else
+ // WINAPI_FAMILY defined but no known partition matched.
+ // Default to desktop.
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# endif
+# else
+# define GTEST_OS_WINDOWS_DESKTOP 1
+# endif // _WIN32_WCE
+#elif defined __APPLE__
+# define GTEST_OS_MAC 1
+# if TARGET_OS_IPHONE
+# define GTEST_OS_IOS 1
+# endif
+#elif defined __FreeBSD__
+# define GTEST_OS_FREEBSD 1
+#elif defined __linux__
+# define GTEST_OS_LINUX 1
+# if defined __ANDROID__
+# define GTEST_OS_LINUX_ANDROID 1
+# endif
+#elif defined __MVS__
+# define GTEST_OS_ZOS 1
+#elif defined(__sun) && defined(__SVR4)
+# define GTEST_OS_SOLARIS 1
+#elif defined(_AIX)
+# define GTEST_OS_AIX 1
+#elif defined(__hpux)
+# define GTEST_OS_HPUX 1
+#elif defined __native_client__
+# define GTEST_OS_NACL 1
+#elif defined __OpenBSD__
+# define GTEST_OS_OPENBSD 1
+#elif defined __QNX__
+# define GTEST_OS_QNX 1
+#endif // __CYGWIN__
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_ARCH_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port.h
new file mode 100644
index 000000000..da57e65d3
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-port.h
@@ -0,0 +1,2567 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan)
+//
+// Low-level types and utilities for porting Google Test to various
+// platforms. All macros ending with _ and symbols defined in an
+// internal namespace are subject to change without notice. Code
+// outside Google Test MUST NOT USE THEM DIRECTLY. Macros that don't
+// end with _ are part of Google Test's public API and can be used by
+// code outside Google Test.
+//
+// This file is fundamental to Google Test. All other Google Test source
+// files are expected to #include this. Therefore, it cannot #include
+// any other Google Test header.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
+
+// Environment-describing macros
+// -----------------------------
+//
+// Google Test can be used in many different environments. Macros in
+// this section tell Google Test what kind of environment it is being
+// used in, such that Google Test can provide environment-specific
+// features and implementations.
+//
+// Google Test tries to automatically detect the properties of its
+// environment, so users usually don't need to worry about these
+// macros. However, the automatic detection is not perfect.
+// Sometimes it's necessary for a user to define some of the following
+// macros in the build script to override Google Test's decisions.
+//
+// If the user doesn't define a macro in the list, Google Test will
+// provide a default definition. After this header is #included, all
+// macros in this list will be defined to either 1 or 0.
+//
+// Notes to maintainers:
+// - Each macro here is a user-tweakable knob; do not grow the list
+// lightly.
+// - Use #if to key off these macros. Don't use #ifdef or "#if
+// defined(...)", which will not work as these macros are ALWAYS
+// defined.
+//
+// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2)
+// is/isn't available.
+// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions
+// are enabled.
+// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::string, which is different to std::string).
+// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string
+// is/isn't available (some systems define
+// ::wstring, which is different to std::wstring).
+// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular
+// expressions are/aren't available.
+// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that <pthread.h>
+// is/isn't available.
+// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't
+// enabled.
+// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that
+// std::wstring does/doesn't work (Google Test can
+// be used where std::wstring is unavailable).
+// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple
+// is/isn't available.
+// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the
+// compiler supports Microsoft's "Structured
+// Exception Handling".
+// GTEST_HAS_STREAM_REDIRECTION
+// - Define it to 1/0 to indicate whether the
+// platform supports I/O stream redirection using
+// dup() and dup2().
+// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google
+// Test's own tr1 tuple implementation should be
+// used. Unused when the user sets
+// GTEST_HAS_TR1_TUPLE to 0.
+// GTEST_LANG_CXX11 - Define it to 1/0 to indicate that Google Test
+// is building in C++11/C++98 mode.
+// GTEST_LINKED_AS_SHARED_LIBRARY
+// - Define to 1 when compiling tests that use
+// Google Test as a shared library (known as
+// DLL on Windows).
+// GTEST_CREATE_SHARED_LIBRARY
+// - Define to 1 when compiling Google Test itself
+// as a shared library.
+
+// Platform-indicating macros
+// --------------------------
+//
+// Macros indicating the platform on which Google Test is being used
+// (a macro is defined to 1 if compiled on the given platform;
+// otherwise UNDEFINED -- it's never defined to 0.). Google Test
+// defines these macros automatically. Code outside Google Test MUST
+// NOT define them.
+//
+// GTEST_OS_AIX - IBM AIX
+// GTEST_OS_CYGWIN - Cygwin
+// GTEST_OS_FREEBSD - FreeBSD
+// GTEST_OS_HPUX - HP-UX
+// GTEST_OS_LINUX - Linux
+// GTEST_OS_LINUX_ANDROID - Google Android
+// GTEST_OS_MAC - Mac OS X
+// GTEST_OS_IOS - iOS
+// GTEST_OS_NACL - Google Native Client (NaCl)
+// GTEST_OS_OPENBSD - OpenBSD
+// GTEST_OS_QNX - QNX
+// GTEST_OS_SOLARIS - Sun Solaris
+// GTEST_OS_SYMBIAN - Symbian
+// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile)
+// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop
+// GTEST_OS_WINDOWS_MINGW - MinGW
+// GTEST_OS_WINDOWS_MOBILE - Windows Mobile
+// GTEST_OS_WINDOWS_PHONE - Windows Phone
+// GTEST_OS_WINDOWS_RT - Windows Store App/WinRT
+// GTEST_OS_ZOS - z/OS
+//
+// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the
+// most stable support. Since core members of the Google Test project
+// don't have access to other platforms, support for them may be less
+// stable. If you notice any problems on your platform, please notify
+// googletestframework@googlegroups.com (patches for fixing them are
+// even more welcome!).
+//
+// It is possible that none of the GTEST_OS_* macros are defined.
+
+// Feature-indicating macros
+// -------------------------
+//
+// Macros indicating which Google Test features are available (a macro
+// is defined to 1 if the corresponding feature is supported;
+// otherwise UNDEFINED -- it's never defined to 0.). Google Test
+// defines these macros automatically. Code outside Google Test MUST
+// NOT define them.
+//
+// These macros are public so that portable tests can be written.
+// Such tests typically surround code using a feature with an #if
+// which controls that code. For example:
+//
+// #if GTEST_HAS_DEATH_TEST
+// EXPECT_DEATH(DoSomethingDeadly());
+// #endif
+//
+// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized
+// tests)
+// GTEST_HAS_DEATH_TEST - death tests
+// GTEST_HAS_PARAM_TEST - value-parameterized tests
+// GTEST_HAS_TYPED_TEST - typed tests
+// GTEST_HAS_TYPED_TEST_P - type-parameterized tests
+// GTEST_IS_THREADSAFE - Google Test is thread-safe.
+// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with
+// GTEST_HAS_POSIX_RE (see above) which users can
+// define themselves.
+// GTEST_USES_SIMPLE_RE - our own simple regex is used;
+// the above two are mutually exclusive.
+// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ().
+
+// Misc public macros
+// ------------------
+//
+// GTEST_FLAG(flag_name) - references the variable corresponding to
+// the given Google Test flag.
+
+// Internal utilities
+// ------------------
+//
+// The following macros and utilities are for Google Test's INTERNAL
+// use only. Code outside Google Test MUST NOT USE THEM DIRECTLY.
+//
+// Macros for basic C++ coding:
+// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning.
+// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a
+// variable don't have to be used.
+// GTEST_DISALLOW_ASSIGN_ - disables operator=.
+// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=.
+// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used.
+// GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is
+// suppressed (constant conditional).
+// GTEST_INTENTIONAL_CONST_COND_POP_ - finish code section where MSVC C4127
+// is suppressed.
+//
+// C++11 feature wrappers:
+//
+// testing::internal::move - portability wrapper for std::move.
+//
+// Synchronization:
+// Mutex, MutexLock, ThreadLocal, GetThreadCount()
+// - synchronization primitives.
+//
+// Template meta programming:
+// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only.
+// IteratorTraits - partial implementation of std::iterator_traits, which
+// is not available in libCstd when compiled with Sun C++.
+//
+// Smart pointers:
+// scoped_ptr - as in TR2.
+//
+// Regular expressions:
+// RE - a simple regular expression class using the POSIX
+// Extended Regular Expression syntax on UNIX-like
+// platforms, or a reduced regular exception syntax on
+// other platforms, including Windows.
+//
+// Logging:
+// GTEST_LOG_() - logs messages at the specified severity level.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+//
+// Stdout and stderr capturing:
+// CaptureStdout() - starts capturing stdout.
+// GetCapturedStdout() - stops capturing stdout and returns the captured
+// string.
+// CaptureStderr() - starts capturing stderr.
+// GetCapturedStderr() - stops capturing stderr and returns the captured
+// string.
+//
+// Integer types:
+// TypeWithSize - maps an integer to a int type.
+// Int32, UInt32, Int64, UInt64, TimeInMillis
+// - integers of known sizes.
+// BiggestInt - the biggest signed integer type.
+//
+// Command-line utilities:
+// GTEST_DECLARE_*() - declares a flag.
+// GTEST_DEFINE_*() - defines a flag.
+// GetInjectableArgvs() - returns the command line as a vector of strings.
+//
+// Environment variable utilities:
+// GetEnv() - gets the value of an environment variable.
+// BoolFromGTestEnv() - parses a bool environment variable.
+// Int32FromGTestEnv() - parses an Int32 environment variable.
+// StringFromGTestEnv() - parses a string environment variable.
+
+#include <ctype.h> // for isspace, etc
+#include <stddef.h> // for ptrdiff_t
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#ifndef _WIN32_WCE
+# include <sys/types.h>
+# include <sys/stat.h>
+#endif // !_WIN32_WCE
+
+#if defined __APPLE__
+# include <AvailabilityMacros.h>
+# include <TargetConditionals.h>
+#endif
+
+#include <algorithm> // NOLINT
+#include <iostream> // NOLINT
+#include <sstream> // NOLINT
+#include <string> // NOLINT
+#include <utility>
+#include <vector> // NOLINT
+
+#include "gtest/internal/gtest-port-arch.h"
+#include "gtest/internal/custom/gtest-port.h"
+
+#if !defined(GTEST_DEV_EMAIL_)
+# define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com"
+# define GTEST_FLAG_PREFIX_ "gtest_"
+# define GTEST_FLAG_PREFIX_DASH_ "gtest-"
+# define GTEST_FLAG_PREFIX_UPPER_ "GTEST_"
+# define GTEST_NAME_ "Google Test"
+# define GTEST_PROJECT_URL_ "https://github.com/google/googletest/"
+#endif // !defined(GTEST_DEV_EMAIL_)
+
+#if !defined(GTEST_INIT_GOOGLE_TEST_NAME_)
+# define GTEST_INIT_GOOGLE_TEST_NAME_ "testing::InitGoogleTest"
+#endif // !defined(GTEST_INIT_GOOGLE_TEST_NAME_)
+
+// Determines the version of gcc that is used to compile this.
+#ifdef __GNUC__
+// 40302 means version 4.3.2.
+# define GTEST_GCC_VER_ \
+ (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__)
+#endif // __GNUC__
+
+// Macros for disabling Microsoft Visual C++ warnings.
+//
+// GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385)
+// /* code that triggers warnings C4800 and C4385 */
+// GTEST_DISABLE_MSC_WARNINGS_POP_()
+#if _MSC_VER >= 1500
+# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \
+ __pragma(warning(push)) \
+ __pragma(warning(disable: warnings))
+# define GTEST_DISABLE_MSC_WARNINGS_POP_() \
+ __pragma(warning(pop))
+#else
+// Older versions of MSVC don't have __pragma.
+# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings)
+# define GTEST_DISABLE_MSC_WARNINGS_POP_()
+#endif
+
+#ifndef GTEST_LANG_CXX11
+// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when
+// -std={c,gnu}++{0x,11} is passed. The C++11 standard specifies a
+// value for __cplusplus, and recent versions of clang, gcc, and
+// probably other compilers set that too in C++11 mode.
+# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L
+// Compiling in at least C++11 mode.
+# define GTEST_LANG_CXX11 1
+# else
+# define GTEST_LANG_CXX11 0
+# endif
+#endif
+
+// Distinct from C++11 language support, some environments don't provide
+// proper C++11 library support. Notably, it's possible to build in
+// C++11 mode when targeting Mac OS X 10.6, which has an old libstdc++
+// with no C++11 support.
+//
+// libstdc++ has sufficient C++11 support as of GCC 4.6.0, __GLIBCXX__
+// 20110325, but maintenance releases in the 4.4 and 4.5 series followed
+// this date, so check for those versions by their date stamps.
+// https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html#abi.versioning
+#if GTEST_LANG_CXX11 && \
+ (!defined(__GLIBCXX__) || ( \
+ __GLIBCXX__ >= 20110325ul && /* GCC >= 4.6.0 */ \
+ /* Blacklist of patch releases of older branches: */ \
+ __GLIBCXX__ != 20110416ul && /* GCC 4.4.6 */ \
+ __GLIBCXX__ != 20120313ul && /* GCC 4.4.7 */ \
+ __GLIBCXX__ != 20110428ul && /* GCC 4.5.3 */ \
+ __GLIBCXX__ != 20120702ul)) /* GCC 4.5.4 */
+# define GTEST_STDLIB_CXX11 1
+#endif
+
+// Only use C++11 library features if the library provides them.
+#if GTEST_STDLIB_CXX11
+# define GTEST_HAS_STD_BEGIN_AND_END_ 1
+# define GTEST_HAS_STD_FORWARD_LIST_ 1
+# define GTEST_HAS_STD_FUNCTION_ 1
+# define GTEST_HAS_STD_INITIALIZER_LIST_ 1
+# define GTEST_HAS_STD_MOVE_ 1
+# define GTEST_HAS_STD_SHARED_PTR_ 1
+# define GTEST_HAS_STD_TYPE_TRAITS_ 1
+# define GTEST_HAS_STD_UNIQUE_PTR_ 1
+#endif
+
+// C++11 specifies that <tuple> provides std::tuple.
+// Some platforms still might not have it, however.
+#if GTEST_LANG_CXX11
+# define GTEST_HAS_STD_TUPLE_ 1
+# if defined(__clang__)
+// Inspired by http://clang.llvm.org/docs/LanguageExtensions.html#__has_include
+# if defined(__has_include) && !__has_include(<tuple>)
+# undef GTEST_HAS_STD_TUPLE_
+# endif
+# elif defined(_MSC_VER)
+// Inspired by boost/config/stdlib/dinkumware.hpp
+# if defined(_CPPLIB_VER) && _CPPLIB_VER < 520
+# undef GTEST_HAS_STD_TUPLE_
+# endif
+# elif defined(__GLIBCXX__)
+// Inspired by boost/config/stdlib/libstdcpp3.hpp,
+// http://gcc.gnu.org/gcc-4.2/changes.html and
+// http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt01ch01.html#manual.intro.status.standard.200x
+# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2)
+# undef GTEST_HAS_STD_TUPLE_
+# endif
+# endif
+#endif
+
+// Brings in definitions for functions used in the testing::internal::posix
+// namespace (read, write, close, chdir, isatty, stat). We do not currently
+// use them on Windows Mobile.
+#if GTEST_OS_WINDOWS
+# if !GTEST_OS_WINDOWS_MOBILE
+# include <direct.h>
+# include <io.h>
+# endif
+// In order to avoid having to include <windows.h>, use forward declaration
+// assuming CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION.
+// This assumption is verified by
+// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION.
+struct _RTL_CRITICAL_SECTION;
+#else
+// This assumes that non-Windows OSes provide unistd.h. For OSes where this
+// is not the case, we need to include headers that provide the functions
+// mentioned above.
+# include <unistd.h>
+# include <strings.h>
+#endif // GTEST_OS_WINDOWS
+
+#if GTEST_OS_LINUX_ANDROID
+// Used to define __ANDROID_API__ matching the target NDK API level.
+# include <android/api-level.h> // NOLINT
+#endif
+
+// Defines this to true iff Google Test can use POSIX regular expressions.
+#ifndef GTEST_HAS_POSIX_RE
+# if GTEST_OS_LINUX_ANDROID
+// On Android, <regex.h> is only available starting with Gingerbread.
+# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9)
+# else
+# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS)
+# endif
+#endif
+
+#if GTEST_USES_PCRE
+// The appropriate headers have already been included.
+
+#elif GTEST_HAS_POSIX_RE
+
+// On some platforms, <regex.h> needs someone to define size_t, and
+// won't compile otherwise. We can #include it here as we already
+// included <stdlib.h>, which is guaranteed to define size_t through
+// <stddef.h>.
+# include <regex.h> // NOLINT
+
+# define GTEST_USES_POSIX_RE 1
+
+#elif GTEST_OS_WINDOWS
+
+// <regex.h> is not available on Windows. Use our own simple regex
+// implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#else
+
+// <regex.h> may not be available on this platform. Use our own
+// simple regex implementation instead.
+# define GTEST_USES_SIMPLE_RE 1
+
+#endif // GTEST_USES_PCRE
+
+#ifndef GTEST_HAS_EXCEPTIONS
+// The user didn't tell us whether exceptions are enabled, so we need
+// to figure it out.
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS
+// macro to enable exceptions, so we'll do the same.
+// Assumes that exceptions are enabled by default.
+# ifndef _HAS_EXCEPTIONS
+# define _HAS_EXCEPTIONS 1
+# endif // _HAS_EXCEPTIONS
+# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS
+# elif defined(__clang__)
+// clang defines __EXCEPTIONS iff exceptions are enabled before clang 220714,
+// but iff cleanups are enabled after that. In Obj-C++ files, there can be
+// cleanups for ObjC exceptions which also need cleanups, even if C++ exceptions
+// are disabled. clang has __has_feature(cxx_exceptions) which checks for C++
+// exceptions starting at clang r206352, but which checked for cleanups prior to
+// that. To reliably check for C++ exception availability with clang, check for
+// __EXCEPTIONS && __has_feature(cxx_exceptions).
+# define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions))
+# elif defined(__GNUC__) && __EXCEPTIONS
+// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__SUNPRO_CC)
+// Sun Pro CC supports exceptions. However, there is no compile-time way of
+// detecting whether they are enabled or not. Therefore, we assume that
+// they are enabled unless the user tells us otherwise.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__IBMCPP__) && __EXCEPTIONS
+// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled.
+# define GTEST_HAS_EXCEPTIONS 1
+# elif defined(__HP_aCC)
+// Exception handling is in effect by default in HP aCC compiler. It has to
+// be turned of by +noeh compiler option if desired.
+# define GTEST_HAS_EXCEPTIONS 1
+# else
+// For other compilers, we assume exceptions are disabled to be
+// conservative.
+# define GTEST_HAS_EXCEPTIONS 0
+# endif // defined(_MSC_VER) || defined(__BORLANDC__)
+#endif // GTEST_HAS_EXCEPTIONS
+
+#if !defined(GTEST_HAS_STD_STRING)
+// Even though we don't use this macro any longer, we keep it in case
+// some clients still depend on it.
+# define GTEST_HAS_STD_STRING 1
+#elif !GTEST_HAS_STD_STRING
+// The user told us that ::std::string isn't available.
+# error "Google Test cannot be used where ::std::string isn't available."
+#endif // !defined(GTEST_HAS_STD_STRING)
+
+#ifndef GTEST_HAS_GLOBAL_STRING
+// The user didn't tell us whether ::string is available, so we need
+// to figure it out.
+
+# define GTEST_HAS_GLOBAL_STRING 0
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#ifndef GTEST_HAS_STD_WSTRING
+// The user didn't tell us whether ::std::wstring is available, so we need
+// to figure it out.
+// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring
+// is available.
+
+// Cygwin 1.7 and below doesn't support ::std::wstring.
+// Solaris' libc++ doesn't support it either. Android has
+// no support for it at least as recent as Froyo (2.2).
+# define GTEST_HAS_STD_WSTRING \
+ (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS))
+
+#endif // GTEST_HAS_STD_WSTRING
+
+#ifndef GTEST_HAS_GLOBAL_WSTRING
+// The user didn't tell us whether ::wstring is available, so we need
+// to figure it out.
+# define GTEST_HAS_GLOBAL_WSTRING \
+ (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING)
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// Determines whether RTTI is available.
+#ifndef GTEST_HAS_RTTI
+// The user didn't tell us whether RTTI is enabled, so we need to
+// figure it out.
+
+# ifdef _MSC_VER
+
+# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled.
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302)
+
+# ifdef __GXX_RTTI
+// When building against STLport with the Android NDK and with
+// -frtti -fno-exceptions, the build fails at link time with undefined
+// references to __cxa_bad_typeid. Note sure if STL or toolchain bug,
+// so disable RTTI when detected.
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \
+ !defined(__EXCEPTIONS)
+# define GTEST_HAS_RTTI 0
+# else
+# define GTEST_HAS_RTTI 1
+# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS
+# else
+# define GTEST_HAS_RTTI 0
+# endif // __GXX_RTTI
+
+// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends
+// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the
+// first version with C++ support.
+# elif defined(__clang__)
+
+# define GTEST_HAS_RTTI __has_feature(cxx_rtti)
+
+// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if
+// both the typeid and dynamic_cast features are present.
+# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900)
+
+# ifdef __RTTI_ALL__
+# define GTEST_HAS_RTTI 1
+# else
+# define GTEST_HAS_RTTI 0
+# endif
+
+# else
+
+// For all other compilers, we assume RTTI is enabled.
+# define GTEST_HAS_RTTI 1
+
+# endif // _MSC_VER
+
+#endif // GTEST_HAS_RTTI
+
+// It's this header's responsibility to #include <typeinfo> when RTTI
+// is enabled.
+#if GTEST_HAS_RTTI
+# include <typeinfo>
+#endif
+
+// Determines whether Google Test can use the pthreads library.
+#ifndef GTEST_HAS_PTHREAD
+// The user didn't tell us explicitly, so we make reasonable assumptions about
+// which platforms have pthreads support.
+//
+// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0
+// to your compiler flags.
+# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \
+ || GTEST_OS_QNX || GTEST_OS_FREEBSD || GTEST_OS_NACL)
+#endif // GTEST_HAS_PTHREAD
+
+#if GTEST_HAS_PTHREAD
+// gtest-port.h guarantees to #include <pthread.h> when GTEST_HAS_PTHREAD is
+// true.
+# include <pthread.h> // NOLINT
+
+// For timespec and nanosleep, used below.
+# include <time.h> // NOLINT
+#endif
+
+// Determines if hash_map/hash_set are available.
+// Only used for testing against those containers.
+#if !defined(GTEST_HAS_HASH_MAP_)
+# if _MSC_VER
+# define GTEST_HAS_HASH_MAP_ 1 // Indicates that hash_map is available.
+# define GTEST_HAS_HASH_SET_ 1 // Indicates that hash_set is available.
+# endif // _MSC_VER
+#endif // !defined(GTEST_HAS_HASH_MAP_)
+
+// Determines whether Google Test can use tr1/tuple. You can define
+// this macro to 0 to prevent Google Test from using tuple (any
+// feature depending on tuple with be disabled in this mode).
+#ifndef GTEST_HAS_TR1_TUPLE
+# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR)
+// STLport, provided with the Android NDK, has neither <tr1/tuple> or <tuple>.
+# define GTEST_HAS_TR1_TUPLE 0
+# else
+// The user didn't tell us not to do it, so we assume it's OK.
+# define GTEST_HAS_TR1_TUPLE 1
+# endif
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether Google Test's own tr1 tuple implementation
+// should be used.
+#ifndef GTEST_USE_OWN_TR1_TUPLE
+// The user didn't tell us, so we need to figure it out.
+
+// We use our own TR1 tuple if we aren't sure the user has an
+// implementation of it already. At this time, libstdc++ 4.0.0+ and
+// MSVC 2010 are the only mainstream standard libraries that come
+// with a TR1 tuple implementation. NVIDIA's CUDA NVCC compiler
+// pretends to be GCC by defining __GNUC__ and friends, but cannot
+// compile GCC's tuple implementation. MSVC 2008 (9.0) provides TR1
+// tuple in a 323 MB Feature Pack download, which we cannot assume the
+// user has. QNX's QCC compiler is a modified GCC but it doesn't
+// support TR1 tuple. libc++ only provides std::tuple, in C++11 mode,
+// and it can be used with some compilers that define __GNUC__.
+# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \
+ && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600
+# define GTEST_ENV_HAS_TR1_TUPLE_ 1
+# endif
+
+// C++11 specifies that <tuple> provides std::tuple. Use that if gtest is used
+// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6
+// can build with clang but need to use gcc4.2's libstdc++).
+# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325)
+# define GTEST_ENV_HAS_STD_TUPLE_ 1
+# endif
+
+# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_
+# define GTEST_USE_OWN_TR1_TUPLE 0
+# else
+# define GTEST_USE_OWN_TR1_TUPLE 1
+# endif
+
+#endif // GTEST_USE_OWN_TR1_TUPLE
+
+// To avoid conditional compilation everywhere, we make it
+// gtest-port.h's responsibility to #include the header implementing
+// tuple.
+#if GTEST_HAS_STD_TUPLE_
+# include <tuple> // IWYU pragma: export
+# define GTEST_TUPLE_NAMESPACE_ ::std
+#endif // GTEST_HAS_STD_TUPLE_
+
+// We include tr1::tuple even if std::tuple is available to define printers for
+// them.
+#if GTEST_HAS_TR1_TUPLE
+# ifndef GTEST_TUPLE_NAMESPACE_
+# define GTEST_TUPLE_NAMESPACE_ ::std::tr1
+# endif // GTEST_TUPLE_NAMESPACE_
+
+# if GTEST_USE_OWN_TR1_TUPLE
+# include "gtest/internal/gtest-tuple.h" // IWYU pragma: export // NOLINT
+# elif GTEST_ENV_HAS_STD_TUPLE_
+# include <tuple>
+// C++11 puts its tuple into the ::std namespace rather than
+// ::std::tr1. gtest expects tuple to live in ::std::tr1, so put it there.
+// This causes undefined behavior, but supported compilers react in
+// the way we intend.
+namespace std {
+namespace tr1 {
+using ::std::get;
+using ::std::make_tuple;
+using ::std::tuple;
+using ::std::tuple_element;
+using ::std::tuple_size;
+}
+}
+
+# elif GTEST_OS_SYMBIAN
+
+// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to
+// use STLport's tuple implementation, which unfortunately doesn't
+// work as the copy of STLport distributed with Symbian is incomplete.
+// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to
+// use its own tuple implementation.
+# ifdef BOOST_HAS_TR1_TUPLE
+# undef BOOST_HAS_TR1_TUPLE
+# endif // BOOST_HAS_TR1_TUPLE
+
+// This prevents <boost/tr1/detail/config.hpp>, which defines
+// BOOST_HAS_TR1_TUPLE, from being #included by Boost's <tuple>.
+# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED
+# include <tuple> // IWYU pragma: export // NOLINT
+
+# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000)
+// GCC 4.0+ implements tr1/tuple in the <tr1/tuple> header. This does
+// not conform to the TR1 spec, which requires the header to be <tuple>.
+
+# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+// Until version 4.3.2, gcc has a bug that causes <tr1/functional>,
+// which is #included by <tr1/tuple>, to not compile when RTTI is
+// disabled. _TR1_FUNCTIONAL is the header guard for
+// <tr1/functional>. Hence the following #define is a hack to prevent
+// <tr1/functional> from being included.
+# define _TR1_FUNCTIONAL 1
+# include <tr1/tuple>
+# undef _TR1_FUNCTIONAL // Allows the user to #include
+ // <tr1/functional> if he chooses to.
+# else
+# include <tr1/tuple> // NOLINT
+# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302
+
+# else
+// If the compiler is not GCC 4.0+, we assume the user is using a
+// spec-conforming TR1 implementation.
+# include <tuple> // IWYU pragma: export // NOLINT
+# endif // GTEST_USE_OWN_TR1_TUPLE
+
+#endif // GTEST_HAS_TR1_TUPLE
+
+// Determines whether clone(2) is supported.
+// Usually it will only be available on Linux, excluding
+// Linux on the Itanium architecture.
+// Also see http://linux.die.net/man/2/clone.
+#ifndef GTEST_HAS_CLONE
+// The user didn't tell us, so we need to figure it out.
+
+# if GTEST_OS_LINUX && !defined(__ia64__)
+# if GTEST_OS_LINUX_ANDROID
+// On Android, clone() is only available on ARM starting with Gingerbread.
+# if defined(__arm__) && __ANDROID_API__ >= 9
+# define GTEST_HAS_CLONE 1
+# else
+# define GTEST_HAS_CLONE 0
+# endif
+# else
+# define GTEST_HAS_CLONE 1
+# endif
+# else
+# define GTEST_HAS_CLONE 0
+# endif // GTEST_OS_LINUX && !defined(__ia64__)
+
+#endif // GTEST_HAS_CLONE
+
+// Determines whether to support stream redirection. This is used to test
+// output correctness and to implement death tests.
+#ifndef GTEST_HAS_STREAM_REDIRECTION
+// By default, we assume that stream redirection is supported on all
+// platforms except known mobile ones.
+# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || \
+ GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
+# define GTEST_HAS_STREAM_REDIRECTION 0
+# else
+# define GTEST_HAS_STREAM_REDIRECTION 1
+# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+// Determines whether to support death tests.
+// Google Test does not support death tests for VC 7.1 and earlier as
+// abort() in a VC 7.1 application compiled as GUI in debug config
+// pops up a dialog window that cannot be suppressed programmatically.
+#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \
+ (GTEST_OS_MAC && !GTEST_OS_IOS) || \
+ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \
+ GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \
+ GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD)
+# define GTEST_HAS_DEATH_TEST 1
+#endif
+
+// We don't support MSVC 7.1 with exceptions disabled now. Therefore
+// all the compilers we care about are adequate for supporting
+// value-parameterized tests.
+#define GTEST_HAS_PARAM_TEST 1
+
+// Determines whether to support type-driven tests.
+
+// Typed tests need <typeinfo> and variadic macros, which GCC, VC++ 8.0,
+// Sun Pro CC, IBM Visual Age, and HP aCC support.
+#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \
+ defined(__IBMCPP__) || defined(__HP_aCC)
+# define GTEST_HAS_TYPED_TEST 1
+# define GTEST_HAS_TYPED_TEST_P 1
+#endif
+
+// Determines whether to support Combine(). This only makes sense when
+// value-parameterized tests are enabled. The implementation doesn't
+// work on Sun Studio since it doesn't understand templated conversion
+// operators.
+#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC)
+# define GTEST_HAS_COMBINE 1
+#endif
+
+// Determines whether the system compiler uses UTF-16 for encoding wide strings.
+#define GTEST_WIDE_STRING_USES_UTF16_ \
+ (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX)
+
+// Determines whether test results can be streamed to a socket.
+#if GTEST_OS_LINUX
+# define GTEST_CAN_STREAM_RESULTS_ 1
+#endif
+
+// Defines some utility macros.
+
+// The GNU compiler emits a warning if nested "if" statements are followed by
+// an "else" statement and braces are not used to explicitly disambiguate the
+// "else" binding. This leads to problems with code like:
+//
+// if (gate)
+// ASSERT_*(condition) << "Some message";
+//
+// The "switch (0) case 0:" idiom is used to suppress this.
+#ifdef __INTEL_COMPILER
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_
+#else
+# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT
+#endif
+
+// Use this annotation at the end of a struct/class definition to
+// prevent the compiler from optimizing away instances that are never
+// used. This is useful when all interesting logic happens inside the
+// c'tor and / or d'tor. Example:
+//
+// struct Foo {
+// Foo() { ... }
+// } GTEST_ATTRIBUTE_UNUSED_;
+//
+// Also use it after a variable or parameter declaration to tell the
+// compiler the variable/parameter does not have to be used.
+#if defined(__GNUC__) && !defined(COMPILER_ICC)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+#elif defined(__clang__)
+# if __has_attribute(unused)
+# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused))
+# endif
+#endif
+#ifndef GTEST_ATTRIBUTE_UNUSED_
+# define GTEST_ATTRIBUTE_UNUSED_
+#endif
+
+// A macro to disallow operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_ASSIGN_(type)\
+ void operator=(type const &)
+
+// A macro to disallow copy constructor and operator=
+// This should be used in the private: declarations for a class.
+#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\
+ type(type const &);\
+ GTEST_DISALLOW_ASSIGN_(type)
+
+// Tell the compiler to warn about unused return values for functions declared
+// with this macro. The macro should be used on function declarations
+// following the argument list:
+//
+// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_;
+#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC)
+# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result))
+#else
+# define GTEST_MUST_USE_RESULT_
+#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC
+
+// MS C++ compiler emits warning when a conditional expression is compile time
+// constant. In some contexts this warning is false positive and needs to be
+// suppressed. Use the following two macros in such cases:
+//
+// GTEST_INTENTIONAL_CONST_COND_PUSH_()
+// while (true) {
+// GTEST_INTENTIONAL_CONST_COND_POP_()
+// }
+# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127)
+# define GTEST_INTENTIONAL_CONST_COND_POP_() \
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+// Determine whether the compiler supports Microsoft's Structured Exception
+// Handling. This is supported by several Windows compilers but generally
+// does not exist on any other system.
+#ifndef GTEST_HAS_SEH
+// The user didn't tell us, so we need to figure it out.
+
+# if defined(_MSC_VER) || defined(__BORLANDC__)
+// These two compilers are known to support SEH.
+# define GTEST_HAS_SEH 1
+# else
+// Assume no SEH.
+# define GTEST_HAS_SEH 0
+# endif
+
+#define GTEST_IS_THREADSAFE \
+ (GTEST_HAS_MUTEX_AND_THREAD_LOCAL_ \
+ || (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) \
+ || GTEST_HAS_PTHREAD)
+
+#endif // GTEST_HAS_SEH
+
+#ifdef _MSC_VER
+# if GTEST_LINKED_AS_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllimport)
+# elif GTEST_CREATE_SHARED_LIBRARY
+# define GTEST_API_ __declspec(dllexport)
+# endif
+#elif __GNUC__ >= 4 || defined(__clang__)
+# define GTEST_API_ __attribute__((visibility ("default")))
+#endif // _MSC_VER
+
+#ifndef GTEST_API_
+# define GTEST_API_
+#endif
+
+#ifdef __GNUC__
+// Ask the compiler to never inline a given function.
+# define GTEST_NO_INLINE_ __attribute__((noinline))
+#else
+# define GTEST_NO_INLINE_
+#endif
+
+// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project.
+#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION)
+# define GTEST_HAS_CXXABI_H_ 1
+#else
+# define GTEST_HAS_CXXABI_H_ 0
+#endif
+
+// A function level attribute to disable checking for use of uninitialized
+// memory when built with MemorySanitizer.
+#if defined(__clang__)
+# if __has_feature(memory_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \
+ __attribute__((no_sanitize_memory))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+# endif // __has_feature(memory_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+#endif // __clang__
+
+// A function level attribute to disable AddressSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(address_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \
+ __attribute__((no_sanitize_address))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+# endif // __has_feature(address_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+#endif // __clang__
+
+// A function level attribute to disable ThreadSanitizer instrumentation.
+#if defined(__clang__)
+# if __has_feature(thread_sanitizer)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \
+ __attribute__((no_sanitize_thread))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+# endif // __has_feature(thread_sanitizer)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+#endif // __clang__
+
+// A function level attribute to disable UndefinedBehaviorSanitizer's (defined)
+// unsigned integer overflow instrumentation.
+#if defined(__clang__)
+# if defined(__has_attribute) && __has_attribute(no_sanitize)
+# define GTEST_ATTRIBUTE_NO_SANITIZE_UNSIGNED_OVERFLOW_ \
+ __attribute__((no_sanitize("unsigned-integer-overflow")))
+# else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_UNSIGNED_OVERFLOW_
+# endif // defined(__has_attribute) && __has_attribute(no_sanitize)
+#else
+# define GTEST_ATTRIBUTE_NO_SANITIZE_UNSIGNED_OVERFLOW_
+#endif // __clang__
+
+namespace testing {
+
+class Message;
+
+#if defined(GTEST_TUPLE_NAMESPACE_)
+// Import tuple and friends into the ::testing namespace.
+// It is part of our interface, having them in ::testing allows us to change
+// their types as needed.
+using GTEST_TUPLE_NAMESPACE_::get;
+using GTEST_TUPLE_NAMESPACE_::make_tuple;
+using GTEST_TUPLE_NAMESPACE_::tuple;
+using GTEST_TUPLE_NAMESPACE_::tuple_size;
+using GTEST_TUPLE_NAMESPACE_::tuple_element;
+#endif // defined(GTEST_TUPLE_NAMESPACE_)
+
+namespace internal {
+
+// A secret type that Google Test users don't know about. It has no
+// definition on purpose. Therefore it's impossible to create a
+// Secret object, which is what we want.
+class Secret;
+
+// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+// GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES,
+// names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+
+#if GTEST_LANG_CXX11
+# define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg)
+#else // !GTEST_LANG_CXX11
+template <bool>
+ struct CompileAssert {
+};
+
+# define GTEST_COMPILE_ASSERT_(expr, msg) \
+ typedef ::testing::internal::CompileAssert<(static_cast<bool>(expr))> \
+ msg[static_cast<bool>(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_
+#endif // !GTEST_LANG_CXX11
+
+// Implementation details of GTEST_COMPILE_ASSERT_:
+//
+// (In C++11, we simply use static_assert instead of the following)
+//
+// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1
+// elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+// does not work, as gcc supports variable-length arrays whose sizes
+// are determined at run-time (this is gcc's extension and not part
+// of the C++ standard). As a result, gcc fails to reject the
+// following code with the simple definition:
+//
+// int foo;
+// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is
+// // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+// expr is a compile-time constant. (Template arguments must be
+// determined at compile-time.)
+//
+// - The outter parentheses in CompileAssert<(bool(expr))> are necessary
+// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written
+//
+// CompileAssert<bool(expr)>
+//
+// instead, these compilers will refuse to compile
+//
+// GTEST_COMPILE_ASSERT_(5 > 0, some_message);
+//
+// (They seem to think the ">" in "5 > 0" marks the end of the
+// template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+// ((expr) ? 1 : -1).
+//
+// This is to avoid running into a bug in MS VC 7.1, which
+// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h.
+//
+// This template is declared, but intentionally undefined.
+template <typename T1, typename T2>
+struct StaticAssertTypeEqHelper;
+
+template <typename T>
+struct StaticAssertTypeEqHelper<T, T> {
+ enum { value = true };
+};
+
+// Evaluates to the number of elements in 'array'.
+#define GTEST_ARRAY_SIZE_(array) (sizeof(array) / sizeof(array[0]))
+
+#if GTEST_HAS_GLOBAL_STRING
+typedef ::string string;
+#else
+typedef ::std::string string;
+#endif // GTEST_HAS_GLOBAL_STRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+typedef ::wstring wstring;
+#elif GTEST_HAS_STD_WSTRING
+typedef ::std::wstring wstring;
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// A helper for suppressing warnings on constant condition. It just
+// returns 'condition'.
+GTEST_API_ bool IsTrue(bool condition);
+
+// Defines scoped_ptr.
+
+// This implementation of scoped_ptr is PARTIAL - it only contains
+// enough stuff to satisfy Google Test's need.
+template <typename T>
+class scoped_ptr {
+ public:
+ typedef T element_type;
+
+ explicit scoped_ptr(T* p = NULL) : ptr_(p) {}
+ ~scoped_ptr() { reset(); }
+
+ T& operator*() const { return *ptr_; }
+ T* operator->() const { return ptr_; }
+ T* get() const { return ptr_; }
+
+ T* release() {
+ T* const ptr = ptr_;
+ ptr_ = NULL;
+ return ptr;
+ }
+
+ void reset(T* p = NULL) {
+ if (p != ptr_) {
+ if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type.
+ delete ptr_;
+ }
+ ptr_ = p;
+ }
+ }
+
+ friend void swap(scoped_ptr& a, scoped_ptr& b) {
+ using std::swap;
+ swap(a.ptr_, b.ptr_);
+ }
+
+ private:
+ T* ptr_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr);
+};
+
+// Defines RE.
+
+// A simple C++ wrapper for <regex.h>. It uses the POSIX Extended
+// Regular Expression syntax.
+class GTEST_API_ RE {
+ public:
+ // A copy constructor is required by the Standard to initialize object
+ // references from r-values.
+ RE(const RE& other) { Init(other.pattern()); }
+
+ // Constructs an RE from a string.
+ RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#if GTEST_HAS_GLOBAL_STRING
+
+ RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ RE(const char* regex) { Init(regex); } // NOLINT
+ ~RE();
+
+ // Returns the string representation of the regex.
+ const char* pattern() const { return pattern_; }
+
+ // FullMatch(str, re) returns true iff regular expression re matches
+ // the entire str.
+ // PartialMatch(str, re) returns true iff regular expression re
+ // matches a substring of str (including str itself).
+ //
+ // TODO(wan@google.com): make FullMatch() and PartialMatch() work
+ // when str contains NUL characters.
+ static bool FullMatch(const ::std::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::std::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#if GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const ::string& str, const RE& re) {
+ return FullMatch(str.c_str(), re);
+ }
+ static bool PartialMatch(const ::string& str, const RE& re) {
+ return PartialMatch(str.c_str(), re);
+ }
+
+#endif // GTEST_HAS_GLOBAL_STRING
+
+ static bool FullMatch(const char* str, const RE& re);
+ static bool PartialMatch(const char* str, const RE& re);
+
+ private:
+ void Init(const char* regex);
+
+ // We use a const char* instead of an std::string, as Google Test used to be
+ // used where std::string is not available. TODO(wan@google.com): change to
+ // std::string.
+ const char* pattern_;
+ bool is_valid_;
+
+#if GTEST_USES_POSIX_RE
+
+ regex_t full_regex_; // For FullMatch().
+ regex_t partial_regex_; // For PartialMatch().
+
+#else // GTEST_USES_SIMPLE_RE
+
+ const char* full_pattern_; // For FullMatch();
+
+#endif
+
+ GTEST_DISALLOW_ASSIGN_(RE);
+};
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line);
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file,
+ int line);
+
+// Defines logging utilities:
+// GTEST_LOG_(severity) - logs messages at the specified severity level. The
+// message itself is streamed into the macro.
+// LogToStderr() - directs all log messages to stderr.
+// FlushInfoLog() - flushes informational log messages.
+
+enum GTestLogSeverity {
+ GTEST_INFO,
+ GTEST_WARNING,
+ GTEST_ERROR,
+ GTEST_FATAL
+};
+
+// Formats log entry severity, provides a stream object for streaming the
+// log message, and terminates the message with a newline when going out of
+// scope.
+class GTEST_API_ GTestLog {
+ public:
+ GTestLog(GTestLogSeverity severity, const char* file, int line);
+
+ // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+ ~GTestLog();
+
+ ::std::ostream& GetStream() { return ::std::cerr; }
+
+ private:
+ const GTestLogSeverity severity_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog);
+};
+
+#if !defined(GTEST_LOG_)
+
+# define GTEST_LOG_(severity) \
+ ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \
+ __FILE__, __LINE__).GetStream()
+
+inline void LogToStderr() {}
+inline void FlushInfoLog() { fflush(NULL); }
+
+#endif // !defined(GTEST_LOG_)
+
+#if !defined(GTEST_CHECK_)
+// INTERNAL IMPLEMENTATION - DO NOT USE.
+//
+// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition
+// is not satisfied.
+// Synopsys:
+// GTEST_CHECK_(boolean_condition);
+// or
+// GTEST_CHECK_(boolean_condition) << "Additional message";
+//
+// This checks the condition and if the condition is not satisfied
+// it prints message about the condition violation, including the
+// condition itself, plus additional message streamed into it, if any,
+// and then it aborts the program. It aborts the program irrespective of
+// whether it is built in the debug mode or not.
+# define GTEST_CHECK_(condition) \
+ GTEST_AMBIGUOUS_ELSE_BLOCKER_ \
+ if (::testing::internal::IsTrue(condition)) \
+ ; \
+ else \
+ GTEST_LOG_(FATAL) << "Condition " #condition " failed. "
+#endif // !defined(GTEST_CHECK_)
+
+// An all-mode assert to verify that the given POSIX-style function
+// call returns 0 (indicating success). Known limitation: this
+// doesn't expand to a balanced 'if' statement, so enclose the macro
+// in {} if you need to use it as the only statement in an 'if'
+// branch.
+#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \
+ if (const int gtest_error = (posix_call)) \
+ GTEST_LOG_(FATAL) << #posix_call << "failed with error " \
+ << gtest_error
+
+#if GTEST_HAS_STD_MOVE_
+using std::move;
+#else // GTEST_HAS_STD_MOVE_
+template <typename T>
+const T& move(const T& t) {
+ return t;
+}
+#endif // GTEST_HAS_STD_MOVE_
+
+// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE.
+//
+// Use ImplicitCast_ as a safe version of static_cast for upcasting in
+// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a
+// const Foo*). When you use ImplicitCast_, the compiler checks that
+// the cast is safe. Such explicit ImplicitCast_s are necessary in
+// surprisingly many situations where C++ demands an exact type match
+// instead of an argument type convertable to a target type.
+//
+// The syntax for using ImplicitCast_ is the same as for static_cast:
+//
+// ImplicitCast_<ToType>(expr)
+//
+// ImplicitCast_ would have been part of the C++ standard library,
+// but the proposal was submitted too late. It will probably make
+// its way into the language in the future.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., implicit_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To>
+inline To ImplicitCast_(To x) { return x; }
+
+// When you upcast (that is, cast a pointer from type Foo to type
+// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts
+// always succeed. When you downcast (that is, cast a pointer from
+// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because
+// how do you know the pointer is really of type SubclassOfFoo? It
+// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus,
+// when you downcast, you should use this macro. In debug mode, we
+// use dynamic_cast<> to double-check the downcast is legal (we die
+// if it's not). In normal mode, we do the efficient static_cast<>
+// instead. Thus, it's important to test in debug mode to make sure
+// the cast is legal!
+// This is the only place in the code we should use dynamic_cast<>.
+// In particular, you SHOULDN'T be using dynamic_cast<> in order to
+// do RTTI (eg code like this:
+// if (dynamic_cast<Subclass1>(foo)) HandleASubclass1Object(foo);
+// if (dynamic_cast<Subclass2>(foo)) HandleASubclass2Object(foo);
+// You should design the code some other way not to need this.
+//
+// This relatively ugly name is intentional. It prevents clashes with
+// similar functions users may have (e.g., down_cast). The internal
+// namespace alone is not enough because the function can be found by ADL.
+template<typename To, typename From> // use like this: DownCast_<T*>(foo);
+inline To DownCast_(From* f) { // so we only accept pointers
+ // Ensures that To is a sub-type of From *. This test is here only
+ // for compile-time type checking, and has no overhead in an
+ // optimized build at run-time, as it will be optimized away
+ // completely.
+ GTEST_INTENTIONAL_CONST_COND_PUSH_()
+ if (false) {
+ GTEST_INTENTIONAL_CONST_COND_POP_()
+ const To to = NULL;
+ ::testing::internal::ImplicitCast_<From*>(to);
+ }
+
+#if GTEST_HAS_RTTI
+ // RTTI: debug mode only!
+ GTEST_CHECK_(f == NULL || dynamic_cast<To>(f) != NULL);
+#endif
+ return static_cast<To>(f);
+}
+
+// Downcasts the pointer of type Base to Derived.
+// Derived must be a subclass of Base. The parameter MUST
+// point to a class of type Derived, not any subclass of it.
+// When RTTI is available, the function performs a runtime
+// check to enforce this.
+template <class Derived, class Base>
+Derived* CheckedDowncastToActualType(Base* base) {
+#if GTEST_HAS_RTTI
+ GTEST_CHECK_(typeid(*base) == typeid(Derived));
+#endif
+
+#if GTEST_HAS_DOWNCAST_
+ return ::down_cast<Derived*>(base);
+#elif GTEST_HAS_RTTI
+ return dynamic_cast<Derived*>(base); // NOLINT
+#else
+ return static_cast<Derived*>(base); // Poor man's downcast.
+#endif
+}
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Defines the stderr capturer:
+// CaptureStdout - starts capturing stdout.
+// GetCapturedStdout - stops capturing stdout and returns the captured string.
+// CaptureStderr - starts capturing stderr.
+// GetCapturedStderr - stops capturing stderr and returns the captured string.
+//
+GTEST_API_ void CaptureStdout();
+GTEST_API_ std::string GetCapturedStdout();
+GTEST_API_ void CaptureStderr();
+GTEST_API_ std::string GetCapturedStderr();
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+// Returns a path to temporary directory.
+GTEST_API_ std::string TempDir();
+
+// Returns the size (in bytes) of a file.
+GTEST_API_ size_t GetFileSize(FILE* file);
+
+// Reads the entire content of a file as a string.
+GTEST_API_ std::string ReadEntireFile(FILE* file);
+
+// All command line arguments.
+GTEST_API_ const ::std::vector<testing::internal::string>& GetArgvs();
+
+#if GTEST_HAS_DEATH_TEST
+
+const ::std::vector<testing::internal::string>& GetInjectableArgvs();
+void SetInjectableArgvs(const ::std::vector<testing::internal::string>*
+ new_argvs);
+
+
+#endif // GTEST_HAS_DEATH_TEST
+
+// Defines synchronization primitives.
+#if GTEST_IS_THREADSAFE
+# if GTEST_HAS_PTHREAD
+// Sleeps for (roughly) n milliseconds. This function is only for testing
+// Google Test's own constructs. Don't use it in user tests, either
+// directly or indirectly.
+inline void SleepMilliseconds(int n) {
+ const timespec time = {
+ 0, // 0 seconds.
+ n * 1000L * 1000L, // And n ms.
+ };
+ nanosleep(&time, NULL);
+}
+# endif // GTEST_HAS_PTHREAD
+
+# if GTEST_HAS_NOTIFICATION_
+// Notification has already been imported into the namespace.
+// Nothing to do here.
+
+# elif GTEST_HAS_PTHREAD
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class Notification {
+ public:
+ Notification() : notified_(false) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ }
+ ~Notification() {
+ pthread_mutex_destroy(&mutex_);
+ }
+
+ // Notifies all threads created with this notification to start. Must
+ // be called from the controller thread.
+ void Notify() {
+ pthread_mutex_lock(&mutex_);
+ notified_ = true;
+ pthread_mutex_unlock(&mutex_);
+ }
+
+ // Blocks until the controller thread notifies. Must be called from a test
+ // thread.
+ void WaitForNotification() {
+ for (;;) {
+ pthread_mutex_lock(&mutex_);
+ const bool notified = notified_;
+ pthread_mutex_unlock(&mutex_);
+ if (notified)
+ break;
+ SleepMilliseconds(10);
+ }
+ }
+
+ private:
+ pthread_mutex_t mutex_;
+ bool notified_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+
+# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+GTEST_API_ void SleepMilliseconds(int n);
+
+// Provides leak-safe Windows kernel handle ownership.
+// Used in death tests and in threading support.
+class GTEST_API_ AutoHandle {
+ public:
+ // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to
+ // avoid including <windows.h> in this header file. Including <windows.h> is
+ // undesirable because it defines a lot of symbols and macros that tend to
+ // conflict with client code. This assumption is verified by
+ // WindowsTypesTest.HANDLEIsVoidStar.
+ typedef void* Handle;
+ AutoHandle();
+ explicit AutoHandle(Handle handle);
+
+ ~AutoHandle();
+
+ Handle Get() const;
+ void Reset();
+ void Reset(Handle handle);
+
+ private:
+ // Returns true iff the handle is a valid handle object that can be closed.
+ bool IsCloseable() const;
+
+ Handle handle_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle);
+};
+
+// Allows a controller thread to pause execution of newly created
+// threads until notified. Instances of this class must be created
+// and destroyed in the controller thread.
+//
+// This class is only for testing Google Test's own constructs. Do not
+// use it in user tests, either directly or indirectly.
+class GTEST_API_ Notification {
+ public:
+ Notification();
+ void Notify();
+ void WaitForNotification();
+
+ private:
+ AutoHandle event_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification);
+};
+# endif // GTEST_HAS_NOTIFICATION_
+
+// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD
+// defined, but we don't want to use MinGW's pthreads implementation, which
+// has conformance problems with some versions of the POSIX standard.
+# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW
+
+// As a C-function, ThreadFuncWithCLinkage cannot be templated itself.
+// Consequently, it cannot select a correct instantiation of ThreadWithParam
+// in order to call its Run(). Introducing ThreadWithParamBase as a
+// non-templated base class for ThreadWithParam allows us to bypass this
+// problem.
+class ThreadWithParamBase {
+ public:
+ virtual ~ThreadWithParamBase() {}
+ virtual void Run() = 0;
+};
+
+// pthread_create() accepts a pointer to a function type with the C linkage.
+// According to the Standard (7.5/1), function types with different linkages
+// are different even if they are otherwise identical. Some compilers (for
+// example, SunStudio) treat them as different types. Since class methods
+// cannot be defined with C-linkage we need to define a free C-function to
+// pass into pthread_create().
+extern "C" inline void* ThreadFuncWithCLinkage(void* thread) {
+ static_cast<ThreadWithParamBase*>(thread)->Run();
+ return NULL;
+}
+
+// Helper class for testing Google Test's multi-threading constructs.
+// To use it, write:
+//
+// void ThreadFunc(int param) { /* Do things with param */ }
+// Notification thread_can_start;
+// ...
+// // The thread_can_start parameter is optional; you can supply NULL.
+// ThreadWithParam<int> thread(&ThreadFunc, 5, &thread_can_start);
+// thread_can_start.Notify();
+//
+// These classes are only for testing Google Test's own constructs. Do
+// not use them in user tests, either directly or indirectly.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void UserThreadFunc(T);
+
+ ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
+ : func_(func),
+ param_(param),
+ thread_can_start_(thread_can_start),
+ finished_(false) {
+ ThreadWithParamBase* const base = this;
+ // The thread can be created only after all fields except thread_
+ // have been initialized.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base));
+ }
+ ~ThreadWithParam() { Join(); }
+
+ void Join() {
+ if (!finished_) {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0));
+ finished_ = true;
+ }
+ }
+
+ virtual void Run() {
+ if (thread_can_start_ != NULL)
+ thread_can_start_->WaitForNotification();
+ func_(param_);
+ }
+
+ private:
+ UserThreadFunc* const func_; // User-supplied thread function.
+ const T param_; // User-supplied parameter to the thread function.
+ // When non-NULL, used to block execution until the controller thread
+ // notifies.
+ Notification* const thread_can_start_;
+ bool finished_; // true iff we know that the thread function has finished.
+ pthread_t thread_; // The native thread object.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+# endif // !GTEST_OS_WINDOWS && GTEST_HAS_PTHREAD ||
+ // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+
+# if GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+// Mutex and ThreadLocal have already been imported into the namespace.
+// Nothing to do here.
+
+# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+// Mutex implements mutex on Windows platforms. It is used in conjunction
+// with class MutexLock:
+//
+// Mutex mutex;
+// ...
+// MutexLock lock(&mutex); // Acquires the mutex and releases it at the
+// // end of the current scope.
+//
+// A static Mutex *must* be defined or declared using one of the following
+// macros:
+// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex);
+// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex);
+//
+// (A non-static Mutex is defined/declared in the usual way).
+class GTEST_API_ Mutex {
+ public:
+ enum MutexType { kStatic = 0, kDynamic = 1 };
+ // We rely on kStaticMutex being 0 as it is to what the linker initializes
+ // type_ in static mutexes. critical_section_ will be initialized lazily
+ // in ThreadSafeLazyInit().
+ enum StaticConstructorSelector { kStaticMutex = 0 };
+
+ // This constructor intentionally does nothing. It relies on type_ being
+ // statically initialized to 0 (effectively setting it to kStatic) and on
+ // ThreadSafeLazyInit() to lazily initialize the rest of the members.
+ explicit Mutex(StaticConstructorSelector /*dummy*/) {}
+
+ Mutex();
+ ~Mutex();
+
+ void Lock();
+
+ void Unlock();
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld();
+
+ private:
+ // Initializes owner_thread_id_ and critical_section_ in static mutexes.
+ void ThreadSafeLazyInit();
+
+ // Per http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx,
+ // we assume that 0 is an invalid value for thread IDs.
+ unsigned int owner_thread_id_;
+
+ // For static mutexes, we rely on these members being initialized to zeros
+ // by the linker.
+ MutexType type_;
+ long critical_section_init_phase_; // NOLINT
+ _RTL_CRITICAL_SECTION* critical_section_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex)
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)". Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ Mutex* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Base class for ValueHolder<T>. Allows a caller to hold and delete a value
+// without knowing its type.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Provides a way for a thread to send notifications to a ThreadLocal
+// regardless of its parameter type.
+class ThreadLocalBase {
+ public:
+ // Creates a new ValueHolder<T> object holding a default value passed to
+ // this ThreadLocal<T>'s constructor and returns it. It is the caller's
+ // responsibility not to call this when the ThreadLocal<T> instance already
+ // has a value on the current thread.
+ virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0;
+
+ protected:
+ ThreadLocalBase() {}
+ virtual ~ThreadLocalBase() {}
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase);
+};
+
+// Maps a thread to a set of ThreadLocals that have values instantiated on that
+// thread and notifies them when the thread exits. A ThreadLocal instance is
+// expected to persist until all threads it has values on have terminated.
+class GTEST_API_ ThreadLocalRegistry {
+ public:
+ // Registers thread_local_instance as having value on the current thread.
+ // Returns a value that can be used to identify the thread from other threads.
+ static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
+ const ThreadLocalBase* thread_local_instance);
+
+ // Invoked when a ThreadLocal instance is destroyed.
+ static void OnThreadLocalDestroyed(
+ const ThreadLocalBase* thread_local_instance);
+};
+
+class GTEST_API_ ThreadWithParamBase {
+ public:
+ void Join();
+
+ protected:
+ class Runnable {
+ public:
+ virtual ~Runnable() {}
+ virtual void Run() = 0;
+ };
+
+ ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start);
+ virtual ~ThreadWithParamBase();
+
+ private:
+ AutoHandle thread_;
+};
+
+// Helper class for testing Google Test's multi-threading constructs.
+template <typename T>
+class ThreadWithParam : public ThreadWithParamBase {
+ public:
+ typedef void UserThreadFunc(T);
+
+ ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start)
+ : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) {
+ }
+ virtual ~ThreadWithParam() {}
+
+ private:
+ class RunnableImpl : public Runnable {
+ public:
+ RunnableImpl(UserThreadFunc* func, T param)
+ : func_(func),
+ param_(param) {
+ }
+ virtual ~RunnableImpl() {}
+ virtual void Run() {
+ func_(param_);
+ }
+
+ private:
+ UserThreadFunc* const func_;
+ const T param_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl);
+ };
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam);
+};
+
+// Implements thread-local storage on Windows systems.
+//
+// // Thread 1
+// ThreadLocal<int> tl(100); // 100 is the default value for each thread.
+//
+// // Thread 2
+// tl.set(150); // Changes the value for thread 2 only.
+// EXPECT_EQ(150, tl.get());
+//
+// // Thread 1
+// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value.
+// tl.set(200);
+// EXPECT_EQ(200, tl.get());
+//
+// The template type argument T must have a public copy constructor.
+// In addition, the default ThreadLocal constructor requires T to have
+// a public default constructor.
+//
+// The users of a TheadLocal instance have to make sure that all but one
+// threads (including the main one) using that instance have exited before
+// destroying it. Otherwise, the per-thread objects managed for them by the
+// ThreadLocal instance are not guaranteed to be destroyed on all platforms.
+//
+// Google Test only uses global ThreadLocal objects. That means they
+// will die after main() has returned. Therefore, no per-thread
+// object managed by Google Test will be leaked as long as all threads
+// using Google Test have exited when main() returns.
+template <typename T>
+class ThreadLocal : public ThreadLocalBase {
+ public:
+ ThreadLocal() : default_factory_(new DefaultValueHolderFactory()) {}
+ explicit ThreadLocal(const T& value)
+ : default_factory_(new InstanceValueHolderFactory(value)) {}
+
+ ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of T. Can be deleted via its base class without the caller
+ // knowing the type of T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ ValueHolder() : value_() {}
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+
+ T* GetOrCreateValue() const {
+ return static_cast<ValueHolder*>(
+ ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer();
+ }
+
+ virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const {
+ return default_factory_->MakeNewHolder();
+ }
+
+ class ValueHolderFactory {
+ public:
+ ValueHolderFactory() {}
+ virtual ~ValueHolderFactory() {}
+ virtual ValueHolder* MakeNewHolder() const = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);
+ };
+
+ class DefaultValueHolderFactory : public ValueHolderFactory {
+ public:
+ DefaultValueHolderFactory() {}
+ virtual ValueHolder* MakeNewHolder() const { return new ValueHolder(); }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);
+ };
+
+ class InstanceValueHolderFactory : public ValueHolderFactory {
+ public:
+ explicit InstanceValueHolderFactory(const T& value) : value_(value) {}
+ virtual ValueHolder* MakeNewHolder() const {
+ return new ValueHolder(value_);
+ }
+
+ private:
+ const T value_; // The value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);
+ };
+
+ scoped_ptr<ValueHolderFactory> default_factory_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# elif GTEST_HAS_PTHREAD
+
+// MutexBase and Mutex implement mutex on pthreads-based platforms.
+class MutexBase {
+ public:
+ // Acquires this mutex.
+ void Lock() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_));
+ owner_ = pthread_self();
+ has_owner_ = true;
+ }
+
+ // Releases this mutex.
+ void Unlock() {
+ // Since the lock is being released the owner_ field should no longer be
+ // considered valid. We don't protect writing to has_owner_ here, as it's
+ // the caller's responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ has_owner_ = false;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_));
+ }
+
+ // Does nothing if the current thread holds the mutex. Otherwise, crashes
+ // with high probability.
+ void AssertHeld() const {
+ GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self()))
+ << "The current thread is not holding the mutex @" << this;
+ }
+
+ // A static mutex may be used before main() is entered. It may even
+ // be used before the dynamic initialization stage. Therefore we
+ // must be able to initialize a static mutex object at link time.
+ // This means MutexBase has to be a POD and its member variables
+ // have to be public.
+ public:
+ pthread_mutex_t mutex_; // The underlying pthread mutex.
+ // has_owner_ indicates whether the owner_ field below contains a valid thread
+ // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All
+ // accesses to the owner_ field should be protected by a check of this field.
+ // An alternative might be to memset() owner_ to all zeros, but there's no
+ // guarantee that a zero'd pthread_t is necessarily invalid or even different
+ // from pthread_self().
+ bool has_owner_;
+ pthread_t owner_; // The thread holding the mutex.
+};
+
+// Forward-declares a static mutex.
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::MutexBase mutex
+
+// Defines and statically (i.e. at link time) initializes a static mutex.
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \
+ ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false, pthread_t() }
+
+// The Mutex class can only be used for mutexes created at runtime. It
+// shares its API with MutexBase otherwise.
+class Mutex : public MutexBase {
+ public:
+ Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL));
+ has_owner_ = false;
+ }
+ ~Mutex() {
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_));
+ }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex);
+};
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)". Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(MutexBase* mutex)
+ : mutex_(mutex) { mutex_->Lock(); }
+
+ ~GTestMutexLock() { mutex_->Unlock(); }
+
+ private:
+ MutexBase* const mutex_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock);
+};
+
+typedef GTestMutexLock MutexLock;
+
+// Helpers for ThreadLocal.
+
+// pthread_key_create() requires DeleteThreadLocalValue() to have
+// C-linkage. Therefore it cannot be templatized to access
+// ThreadLocal<T>. Hence the need for class
+// ThreadLocalValueHolderBase.
+class ThreadLocalValueHolderBase {
+ public:
+ virtual ~ThreadLocalValueHolderBase() {}
+};
+
+// Called by pthread to delete thread-local data stored by
+// pthread_setspecific().
+extern "C" inline void DeleteThreadLocalValue(void* value_holder) {
+ delete static_cast<ThreadLocalValueHolderBase*>(value_holder);
+}
+
+// Implements thread-local storage on pthreads-based systems.
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal()
+ : key_(CreateKey()), default_factory_(new DefaultValueHolderFactory()) {}
+ explicit ThreadLocal(const T& value)
+ : key_(CreateKey()),
+ default_factory_(new InstanceValueHolderFactory(value)) {}
+
+ ~ThreadLocal() {
+ // Destroys the managed object for the current thread, if any.
+ DeleteThreadLocalValue(pthread_getspecific(key_));
+
+ // Releases resources associated with the key. This will *not*
+ // delete managed objects for other threads.
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_));
+ }
+
+ T* pointer() { return GetOrCreateValue(); }
+ const T* pointer() const { return GetOrCreateValue(); }
+ const T& get() const { return *pointer(); }
+ void set(const T& value) { *pointer() = value; }
+
+ private:
+ // Holds a value of type T.
+ class ValueHolder : public ThreadLocalValueHolderBase {
+ public:
+ ValueHolder() : value_() {}
+ explicit ValueHolder(const T& value) : value_(value) {}
+
+ T* pointer() { return &value_; }
+
+ private:
+ T value_;
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder);
+ };
+
+ static pthread_key_t CreateKey() {
+ pthread_key_t key;
+ // When a thread exits, DeleteThreadLocalValue() will be called on
+ // the object managed for that thread.
+ GTEST_CHECK_POSIX_SUCCESS_(
+ pthread_key_create(&key, &DeleteThreadLocalValue));
+ return key;
+ }
+
+ T* GetOrCreateValue() const {
+ ThreadLocalValueHolderBase* const holder =
+ static_cast<ThreadLocalValueHolderBase*>(pthread_getspecific(key_));
+ if (holder != NULL) {
+ return CheckedDowncastToActualType<ValueHolder>(holder)->pointer();
+ }
+
+ ValueHolder* const new_holder = default_factory_->MakeNewHolder();
+ ThreadLocalValueHolderBase* const holder_base = new_holder;
+ GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base));
+ return new_holder->pointer();
+ }
+
+ class ValueHolderFactory {
+ public:
+ ValueHolderFactory() {}
+ virtual ~ValueHolderFactory() {}
+ virtual ValueHolder* MakeNewHolder() const = 0;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolderFactory);
+ };
+
+ class DefaultValueHolderFactory : public ValueHolderFactory {
+ public:
+ DefaultValueHolderFactory() {}
+ virtual ValueHolder* MakeNewHolder() const { return new ValueHolder(); }
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultValueHolderFactory);
+ };
+
+ class InstanceValueHolderFactory : public ValueHolderFactory {
+ public:
+ explicit InstanceValueHolderFactory(const T& value) : value_(value) {}
+ virtual ValueHolder* MakeNewHolder() const {
+ return new ValueHolder(value_);
+ }
+
+ private:
+ const T value_; // The value for each thread.
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(InstanceValueHolderFactory);
+ };
+
+ // A key pthreads uses for looking up per-thread values.
+ const pthread_key_t key_;
+ scoped_ptr<ValueHolderFactory> default_factory_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal);
+};
+
+# endif // GTEST_HAS_MUTEX_AND_THREAD_LOCAL_
+
+#else // GTEST_IS_THREADSAFE
+
+// A dummy implementation of synchronization primitives (mutex, lock,
+// and thread-local variable). Necessary for compiling Google Test where
+// mutex is not supported - using Google Test in multiple threads is not
+// supported on such platforms.
+
+class Mutex {
+ public:
+ Mutex() {}
+ void Lock() {}
+ void Unlock() {}
+ void AssertHeld() const {}
+};
+
+# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \
+ extern ::testing::internal::Mutex mutex
+
+# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex
+
+// We cannot name this class MutexLock because the ctor declaration would
+// conflict with a macro named MutexLock, which is defined on some
+// platforms. That macro is used as a defensive measure to prevent against
+// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than
+// "MutexLock l(&mu)". Hence the typedef trick below.
+class GTestMutexLock {
+ public:
+ explicit GTestMutexLock(Mutex*) {} // NOLINT
+};
+
+typedef GTestMutexLock MutexLock;
+
+template <typename T>
+class ThreadLocal {
+ public:
+ ThreadLocal() : value_() {}
+ explicit ThreadLocal(const T& value) : value_(value) {}
+ T* pointer() { return &value_; }
+ const T* pointer() const { return &value_; }
+ const T& get() const { return value_; }
+ void set(const T& value) { value_ = value; }
+ private:
+ T value_;
+};
+
+#endif // GTEST_IS_THREADSAFE
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+GTEST_API_ size_t GetThreadCount();
+
+// Passing non-POD classes through ellipsis (...) crashes the ARM
+// compiler and generates a warning in Sun Studio. The Nokia Symbian
+// and the IBM XL C/C++ compiler try to instantiate a copy constructor
+// for objects passed through ellipsis (...), failing for uncopyable
+// objects. We define this to ensure that only POD is passed through
+// ellipsis on these systems.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC)
+// We lose support for NULL detection where the compiler doesn't like
+// passing non-POD classes through ellipsis (...).
+# define GTEST_ELLIPSIS_NEEDS_POD_ 1
+#else
+# define GTEST_CAN_COMPARE_NULL 1
+#endif
+
+// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between
+// const T& and const T* in a function template. These compilers
+// _can_ decide between class template specializations for T and T*,
+// so a tr1::type_traits-like is_pointer works.
+#if defined(__SYMBIAN32__) || defined(__IBMCPP__)
+# define GTEST_NEEDS_IS_POINTER_ 1
+#endif
+
+template <bool bool_value>
+struct bool_constant {
+ typedef bool_constant<bool_value> type;
+ static const bool value = bool_value;
+};
+template <bool bool_value> const bool bool_constant<bool_value>::value;
+
+typedef bool_constant<false> false_type;
+typedef bool_constant<true> true_type;
+
+template <typename T>
+struct is_pointer : public false_type {};
+
+template <typename T>
+struct is_pointer<T*> : public true_type {};
+
+template <typename Iterator>
+struct IteratorTraits {
+ typedef typename Iterator::value_type value_type;
+};
+
+template <typename T>
+struct IteratorTraits<T*> {
+ typedef T value_type;
+};
+
+template <typename T>
+struct IteratorTraits<const T*> {
+ typedef T value_type;
+};
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_SEP_ "\\"
+# define GTEST_HAS_ALT_PATH_SEP_ 1
+// The biggest signed integer type the compiler supports.
+typedef __int64 BiggestInt;
+#else
+# define GTEST_PATH_SEP_ "/"
+# define GTEST_HAS_ALT_PATH_SEP_ 0
+typedef long long BiggestInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+// Utilities for char.
+
+// isspace(int ch) and friends accept an unsigned char or EOF. char
+// may be signed, depending on the compiler (or compiler flags).
+// Therefore we need to cast a char to unsigned char before calling
+// isspace(), etc.
+
+inline bool IsAlpha(char ch) {
+ return isalpha(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsAlNum(char ch) {
+ return isalnum(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsDigit(char ch) {
+ return isdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsLower(char ch) {
+ return islower(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsSpace(char ch) {
+ return isspace(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsUpper(char ch) {
+ return isupper(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(char ch) {
+ return isxdigit(static_cast<unsigned char>(ch)) != 0;
+}
+inline bool IsXDigit(wchar_t ch) {
+ const unsigned char low_byte = static_cast<unsigned char>(ch);
+ return ch == low_byte && isxdigit(low_byte) != 0;
+}
+
+inline char ToLower(char ch) {
+ return static_cast<char>(tolower(static_cast<unsigned char>(ch)));
+}
+inline char ToUpper(char ch) {
+ return static_cast<char>(toupper(static_cast<unsigned char>(ch)));
+}
+
+inline std::string StripTrailingSpaces(std::string str) {
+ std::string::iterator it = str.end();
+ while (it != str.begin() && IsSpace(*--it))
+ it = str.erase(it);
+ return str;
+}
+
+// The testing::internal::posix namespace holds wrappers for common
+// POSIX functions. These wrappers hide the differences between
+// Windows/MSVC and POSIX systems. Since some compilers define these
+// standard functions as macros, the wrapper cannot have the same name
+// as the wrapped function.
+
+namespace posix {
+
+// Functions with a different name on Windows.
+
+#if GTEST_OS_WINDOWS
+
+typedef struct _stat StatStruct;
+
+# ifdef __BORLANDC__
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+# else // !__BORLANDC__
+# if GTEST_OS_WINDOWS_MOBILE
+inline int IsATTY(int /* fd */) { return 0; }
+# else
+inline int IsATTY(int fd) { return _isatty(fd); }
+# endif // GTEST_OS_WINDOWS_MOBILE
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return _stricmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return _strdup(src); }
+# endif // __BORLANDC__
+
+# if GTEST_OS_WINDOWS_MOBILE
+inline int FileNo(FILE* file) { return reinterpret_cast<int>(_fileno(file)); }
+// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this
+// time and thus not defined there.
+# else
+inline int FileNo(FILE* file) { return _fileno(file); }
+inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); }
+inline int RmDir(const char* dir) { return _rmdir(dir); }
+inline bool IsDir(const StatStruct& st) {
+ return (_S_IFDIR & st.st_mode) != 0;
+}
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+#else
+
+typedef struct stat StatStruct;
+
+inline int FileNo(FILE* file) { return fileno(file); }
+inline int IsATTY(int fd) { return isatty(fd); }
+inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); }
+inline int StrCaseCmp(const char* s1, const char* s2) {
+ return strcasecmp(s1, s2);
+}
+inline char* StrDup(const char* src) { return strdup(src); }
+inline int RmDir(const char* dir) { return rmdir(dir); }
+inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); }
+
+#endif // GTEST_OS_WINDOWS
+
+// Functions deprecated by MSVC 8.0.
+
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996 /* deprecated function */)
+
+inline const char* StrNCpy(char* dest, const char* src, size_t n) {
+ return strncpy(dest, src, n);
+}
+
+// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and
+// StrError() aren't needed on Windows CE at this time and thus not
+// defined there.
+
+#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+inline int ChDir(const char* dir) { return chdir(dir); }
+#endif
+inline FILE* FOpen(const char* path, const char* mode) {
+ return fopen(path, mode);
+}
+#if !GTEST_OS_WINDOWS_MOBILE
+inline FILE *FReopen(const char* path, const char* mode, FILE* stream) {
+ return freopen(path, mode, stream);
+}
+inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); }
+#endif
+inline int FClose(FILE* fp) { return fclose(fp); }
+#if !GTEST_OS_WINDOWS_MOBILE
+inline int Read(int fd, void* buf, unsigned int count) {
+ return static_cast<int>(read(fd, buf, count));
+}
+inline int Write(int fd, const void* buf, unsigned int count) {
+ return static_cast<int>(write(fd, buf, count));
+}
+inline int Close(int fd) { return close(fd); }
+inline const char* StrError(int errnum) { return strerror(errnum); }
+#endif
+inline const char* GetEnv(const char* name) {
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE | GTEST_OS_WINDOWS_RT
+ // We are on Windows CE, which has no environment variables.
+ static_cast<void>(name); // To prevent 'unused argument' warning.
+ return NULL;
+#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9)
+ // Environment variables which we programmatically clear will be set to the
+ // empty string rather than unset (NULL). Handle that case.
+ const char* const env = getenv(name);
+ return (env != NULL && env[0] != '\0') ? env : NULL;
+#else
+ return getenv(name);
+#endif
+}
+
+GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Windows CE has no C library. The abort() function is used in
+// several places in Google Test. This implementation provides a reasonable
+// imitation of standard behaviour.
+void Abort();
+#else
+inline void Abort() { abort(); }
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+} // namespace posix
+
+// MSVC "deprecates" snprintf and issues warnings wherever it is used. In
+// order to avoid these warnings, we need to use _snprintf or _snprintf_s on
+// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate
+// function in order to achieve that. We use macro definition here because
+// snprintf is a variadic function.
+#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+// MSVC 2005 and above support variadic macros.
+# define GTEST_SNPRINTF_(buffer, size, format, ...) \
+ _snprintf_s(buffer, size, size, format, __VA_ARGS__)
+#elif defined(_MSC_VER)
+// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't
+// complain about _snprintf.
+# define GTEST_SNPRINTF_ _snprintf
+#else
+# define GTEST_SNPRINTF_ snprintf
+#endif
+
+// The maximum number a BiggestInt can represent. This definition
+// works no matter BiggestInt is represented in one's complement or
+// two's complement.
+//
+// We cannot rely on numeric_limits in STL, as __int64 and long long
+// are not part of standard C++ and numeric_limits doesn't need to be
+// defined for them.
+const BiggestInt kMaxBiggestInt =
+ ~(static_cast<BiggestInt>(1) << (8*sizeof(BiggestInt) - 1));
+
+// This template class serves as a compile-time function from size to
+// type. It maps a size in bytes to a primitive type with that
+// size. e.g.
+//
+// TypeWithSize<4>::UInt
+//
+// is typedef-ed to be unsigned int (unsigned integer made up of 4
+// bytes).
+//
+// Such functionality should belong to STL, but I cannot find it
+// there.
+//
+// Google Test uses this class in the implementation of floating-point
+// comparison.
+//
+// For now it only handles UInt (unsigned int) as that's all Google Test
+// needs. Other types can be easily added in the future if need
+// arises.
+template <size_t size>
+class TypeWithSize {
+ public:
+ // This prevents the user from using TypeWithSize<N> with incorrect
+ // values of N.
+ typedef void UInt;
+};
+
+// The specialization for size 4.
+template <>
+class TypeWithSize<4> {
+ public:
+ // unsigned int has size 4 in both gcc and MSVC.
+ //
+ // As base/basictypes.h doesn't compile on Windows, we cannot use
+ // uint32, uint64, and etc here.
+ typedef int Int;
+ typedef unsigned int UInt;
+};
+
+// The specialization for size 8.
+template <>
+class TypeWithSize<8> {
+ public:
+#if GTEST_OS_WINDOWS
+ typedef __int64 Int;
+ typedef unsigned __int64 UInt;
+#else
+ typedef long long Int; // NOLINT
+ typedef unsigned long long UInt; // NOLINT
+#endif // GTEST_OS_WINDOWS
+};
+
+// Integer types of known sizes.
+typedef TypeWithSize<4>::Int Int32;
+typedef TypeWithSize<4>::UInt UInt32;
+typedef TypeWithSize<8>::Int Int64;
+typedef TypeWithSize<8>::UInt UInt64;
+typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds.
+
+// Utilities for command line flags and environment variables.
+
+// Macro for referencing flags.
+#if !defined(GTEST_FLAG)
+# define GTEST_FLAG(name) FLAGS_gtest_##name
+#endif // !defined(GTEST_FLAG)
+
+#if !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)
+# define GTEST_USE_OWN_FLAGFILE_FLAG_ 1
+#endif // !defined(GTEST_USE_OWN_FLAGFILE_FLAG_)
+
+#if !defined(GTEST_DECLARE_bool_)
+# define GTEST_FLAG_SAVER_ ::testing::internal::GTestFlagSaver
+
+// Macros for declaring flags.
+# define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name)
+# define GTEST_DECLARE_int32_(name) \
+ GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name)
+#define GTEST_DECLARE_string_(name) \
+ GTEST_API_ extern ::std::string GTEST_FLAG(name)
+
+// Macros for defining flags.
+#define GTEST_DEFINE_bool_(name, default_val, doc) \
+ GTEST_API_ bool GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_int32_(name, default_val, doc) \
+ GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val)
+#define GTEST_DEFINE_string_(name, default_val, doc) \
+ GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val)
+
+#endif // !defined(GTEST_DECLARE_bool_)
+
+// Thread annotations
+#if !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)
+# define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks)
+# define GTEST_LOCK_EXCLUDED_(locks)
+#endif // !defined(GTEST_EXCLUSIVE_LOCK_REQUIRED_)
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes the result
+// to *value and returns true; otherwise leaves *value unchanged and returns
+// false.
+// TODO(chandlerc): Find a better way to refactor flag and environment parsing
+// out of both gtest-port.cc and gtest.cc to avoid exporting this utility
+// function.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value);
+
+// Parses a bool/Int32/string from the environment variable
+// corresponding to the given Google Test flag.
+bool BoolFromGTestEnv(const char* flag, bool default_val);
+GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val);
+std::string StringFromGTestEnv(const char* flag, const char* default_val);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-string.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-string.h
new file mode 100644
index 000000000..97f1a7fdd
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-string.h
@@ -0,0 +1,167 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee)
+//
+// The Google C++ Testing Framework (Google Test)
+//
+// This header file declares the String class and functions used internally by
+// Google Test. They are subject to change without notice. They should not used
+// by code external to Google Test.
+//
+// This header file is #included by <gtest/internal/gtest-internal.h>.
+// It should not be #included by other files.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
+
+#ifdef __BORLANDC__
+// string.h is not guaranteed to provide strcpy on C++ Builder.
+# include <mem.h>
+#endif
+
+#include <string.h>
+#include <string>
+
+#include "gtest/internal/gtest-port.h"
+
+namespace testing {
+namespace internal {
+
+// String - an abstract class holding static string utilities.
+class GTEST_API_ String {
+ public:
+ // Static utility methods
+
+ // Clones a 0-terminated C string, allocating memory using new. The
+ // caller is responsible for deleting the return value using
+ // delete[]. Returns the cloned string, or NULL if the input is
+ // NULL.
+ //
+ // This is different from strdup() in string.h, which allocates
+ // memory using malloc().
+ static const char* CloneCString(const char* c_str);
+
+#if GTEST_OS_WINDOWS_MOBILE
+ // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be
+ // able to pass strings to Win32 APIs on CE we need to convert them
+ // to 'Unicode', UTF-16.
+
+ // Creates a UTF-16 wide string from the given ANSI string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the wide string, or NULL if the
+ // input is NULL.
+ //
+ // The wide string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static LPCWSTR AnsiToUtf16(const char* c_str);
+
+ // Creates an ANSI string from the given wide string, allocating
+ // memory using new. The caller is responsible for deleting the return
+ // value using delete[]. Returns the ANSI string, or NULL if the
+ // input is NULL.
+ //
+ // The returned string is created using the ANSI codepage (CP_ACP) to
+ // match the behaviour of the ANSI versions of Win32 calls and the
+ // C runtime.
+ static const char* Utf16ToAnsi(LPCWSTR utf16_str);
+#endif
+
+ // Compares two C strings. Returns true iff they have the same content.
+ //
+ // Unlike strcmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CStringEquals(const char* lhs, const char* rhs);
+
+ // Converts a wide C string to a String using the UTF-8 encoding.
+ // NULL will be converted to "(null)". If an error occurred during
+ // the conversion, "(failed to convert from wide string)" is
+ // returned.
+ static std::string ShowWideCString(const wchar_t* wide_c_str);
+
+ // Compares two wide C strings. Returns true iff they have the same
+ // content.
+ //
+ // Unlike wcscmp(), this function can handle NULL argument(s). A
+ // NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs);
+
+ // Compares two C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike strcasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL C string,
+ // including the empty string.
+ static bool CaseInsensitiveCStringEquals(const char* lhs,
+ const char* rhs);
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+ static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs);
+
+ // Returns true iff the given string ends with the given suffix, ignoring
+ // case. Any string is considered to end with an empty suffix.
+ static bool EndsWithCaseInsensitive(
+ const std::string& str, const std::string& suffix);
+
+ // Formats an int value as "%02d".
+ static std::string FormatIntWidth2(int value); // "%02d" for width == 2
+
+ // Formats an int value as "%X".
+ static std::string FormatHexInt(int value);
+
+ // Formats a byte as "%02X".
+ static std::string FormatByte(unsigned char value);
+
+ private:
+ String(); // Not meant to be instantiated.
+}; // class String
+
+// Gets the content of the stringstream's buffer as an std::string. Each '\0'
+// character in the buffer is replaced with "\\0".
+GTEST_API_ std::string StringStreamToString(::std::stringstream* stream);
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h
new file mode 100644
index 000000000..e9b405340
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h
@@ -0,0 +1,1020 @@
+// This file was GENERATED by command:
+// pump.py gtest-tuple.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility> // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined). This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+ template <GTEST_10_TYPENAMES_(U)> friend class tuple; \
+ private:
+#endif
+
+// Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that conflict
+// with our own definitions. Therefore using our own tuple does not work on
+// those compilers.
+#if defined(_MSC_VER) && _MSC_VER >= 1600 /* 1600 is Visual Studio 2010 */
+# error "gtest's tuple doesn't compile on Visual Studio 2010 or later. \
+GTEST_USE_OWN_TR1_TUPLE must be set to 0 on those compilers."
+#endif
+
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+#define GTEST_1_TUPLE_(T) tuple<T##0, void, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_2_TUPLE_(T) tuple<T##0, T##1, void, void, void, void, void, \
+ void, void, void>
+#define GTEST_3_TUPLE_(T) tuple<T##0, T##1, T##2, void, void, void, void, \
+ void, void, void>
+#define GTEST_4_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, void, void, void, \
+ void, void, void>
+#define GTEST_5_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, void, void, \
+ void, void, void>
+#define GTEST_6_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, void, \
+ void, void, void>
+#define GTEST_7_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ void, void, void>
+#define GTEST_8_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, void, void>
+#define GTEST_9_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, void>
+#define GTEST_10_TUPLE_(T) tuple<T##0, T##1, T##2, T##3, T##4, T##5, T##6, \
+ T##7, T##8, T##9>
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+#define GTEST_0_TYPENAMES_(T)
+#define GTEST_1_TYPENAMES_(T) typename T##0
+#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1
+#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2
+#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3
+#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4
+#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5
+#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6
+#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, typename T##7
+#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8
+#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \
+ typename T##3, typename T##4, typename T##5, typename T##6, \
+ typename T##7, typename T##8, typename T##9
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior. We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <typename T0 = void, typename T1 = void, typename T2 = void,
+ typename T3 = void, typename T4 = void, typename T5 = void,
+ typename T6 = void, typename T7 = void, typename T8 = void,
+ typename T9 = void>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; }; // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&. This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>. kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 0, GTEST_10_TUPLE_(T) > {
+ typedef T0 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 1, GTEST_10_TUPLE_(T) > {
+ typedef T1 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 2, GTEST_10_TUPLE_(T) > {
+ typedef T2 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 3, GTEST_10_TUPLE_(T) > {
+ typedef T3 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 4, GTEST_10_TUPLE_(T) > {
+ typedef T4 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 5, GTEST_10_TUPLE_(T) > {
+ typedef T5 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 6, GTEST_10_TUPLE_(T) > {
+ typedef T6 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 7, GTEST_10_TUPLE_(T) > {
+ typedef T7 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 8, GTEST_10_TUPLE_(T) > {
+ typedef T8 type;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct TupleElement<true, 9, GTEST_10_TUPLE_(T) > {
+ typedef T9 type;
+};
+
+} // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+ tuple() {}
+ tuple(const tuple& /* t */) {}
+ tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+class GTEST_1_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {}
+
+ tuple(const tuple& t) : f0_(t.f0_) {}
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_1_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_1_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ return *this;
+ }
+
+ T0 f0_;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+class GTEST_2_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0),
+ f1_(f1) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {}
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {}
+ template <typename U0, typename U1>
+ tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_2_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+ template <typename U0, typename U1>
+ tuple& operator=(const ::std::pair<U0, U1>& p) {
+ f0_ = p.first;
+ f1_ = p.second;
+ return *this;
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_2_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+class GTEST_3_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_3_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_3_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+class GTEST_4_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {}
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_4_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_4_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+class GTEST_5_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3,
+ GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_) {}
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_5_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_5_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+class GTEST_6_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_) {}
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_6_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_6_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+class GTEST_7_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_7_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_7_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+class GTEST_8_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6,
+ GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_8_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_8_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+class GTEST_9_TUPLE_(T) {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4),
+ f5_(f5), f6_(f6), f7_(f7), f8_(f8) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_9_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_9_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+class tuple {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(),
+ f9_() {}
+
+ explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1,
+ GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4,
+ GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7,
+ GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2),
+ f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {}
+
+ tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_),
+ f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {}
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_),
+ f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_),
+ f9_(t.f9_) {}
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_10_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_10_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) {
+ f0_ = t.f0_;
+ f1_ = t.f1_;
+ f2_ = t.f2_;
+ f3_ = t.f3_;
+ f4_ = t.f4_;
+ f5_ = t.f5_;
+ f6_ = t.f6_;
+ f7_ = t.f7_;
+ f8_ = t.f8_;
+ f9_ = t.f9_;
+ return *this;
+ }
+
+ T0 f0_;
+ T1 f1_;
+ T2 f2_;
+ T3 f3_;
+ T4 f4_;
+ T5 f5_;
+ T6 f6_;
+ T7 f7_;
+ T8 f8_;
+ T9 f9_;
+};
+
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple(). And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+template <GTEST_1_TYPENAMES_(T)>
+inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) {
+ return GTEST_1_TUPLE_(T)(f0);
+}
+
+template <GTEST_2_TYPENAMES_(T)>
+inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) {
+ return GTEST_2_TUPLE_(T)(f0, f1);
+}
+
+template <GTEST_3_TYPENAMES_(T)>
+inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) {
+ return GTEST_3_TUPLE_(T)(f0, f1, f2);
+}
+
+template <GTEST_4_TYPENAMES_(T)>
+inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3) {
+ return GTEST_4_TUPLE_(T)(f0, f1, f2, f3);
+}
+
+template <GTEST_5_TYPENAMES_(T)>
+inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4) {
+ return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4);
+}
+
+template <GTEST_6_TYPENAMES_(T)>
+inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5) {
+ return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5);
+}
+
+template <GTEST_7_TYPENAMES_(T)>
+inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6) {
+ return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6);
+}
+
+template <GTEST_8_TYPENAMES_(T)>
+inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) {
+ return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7);
+}
+
+template <GTEST_9_TYPENAMES_(T)>
+inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8) {
+ return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8);
+}
+
+template <GTEST_10_TYPENAMES_(T)>
+inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2,
+ const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7,
+ const T8& f8, const T9& f9) {
+ return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9);
+}
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+template <GTEST_0_TYPENAMES_(T)>
+struct tuple_size<GTEST_0_TUPLE_(T) > {
+ static const int value = 0;
+};
+
+template <GTEST_1_TYPENAMES_(T)>
+struct tuple_size<GTEST_1_TUPLE_(T) > {
+ static const int value = 1;
+};
+
+template <GTEST_2_TYPENAMES_(T)>
+struct tuple_size<GTEST_2_TUPLE_(T) > {
+ static const int value = 2;
+};
+
+template <GTEST_3_TYPENAMES_(T)>
+struct tuple_size<GTEST_3_TUPLE_(T) > {
+ static const int value = 3;
+};
+
+template <GTEST_4_TYPENAMES_(T)>
+struct tuple_size<GTEST_4_TUPLE_(T) > {
+ static const int value = 4;
+};
+
+template <GTEST_5_TYPENAMES_(T)>
+struct tuple_size<GTEST_5_TUPLE_(T) > {
+ static const int value = 5;
+};
+
+template <GTEST_6_TYPENAMES_(T)>
+struct tuple_size<GTEST_6_TUPLE_(T) > {
+ static const int value = 6;
+};
+
+template <GTEST_7_TYPENAMES_(T)>
+struct tuple_size<GTEST_7_TUPLE_(T) > {
+ static const int value = 7;
+};
+
+template <GTEST_8_TYPENAMES_(T)>
+struct tuple_size<GTEST_8_TUPLE_(T) > {
+ static const int value = 8;
+};
+
+template <GTEST_9_TYPENAMES_(T)>
+struct tuple_size<GTEST_9_TUPLE_(T) > {
+ static const int value = 9;
+};
+
+template <GTEST_10_TYPENAMES_(T)>
+struct tuple_size<GTEST_10_TUPLE_(T) > {
+ static const int value = 10;
+};
+
+template <int k, class Tuple>
+struct tuple_element {
+ typedef typename gtest_internal::TupleElement<
+ k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+template <>
+class Get<0> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ Field(Tuple& t) { return t.f0_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple))
+ ConstField(const Tuple& t) { return t.f0_; }
+};
+
+template <>
+class Get<1> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ Field(Tuple& t) { return t.f1_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple))
+ ConstField(const Tuple& t) { return t.f1_; }
+};
+
+template <>
+class Get<2> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ Field(Tuple& t) { return t.f2_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple))
+ ConstField(const Tuple& t) { return t.f2_; }
+};
+
+template <>
+class Get<3> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ Field(Tuple& t) { return t.f3_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple))
+ ConstField(const Tuple& t) { return t.f3_; }
+};
+
+template <>
+class Get<4> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ Field(Tuple& t) { return t.f4_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple))
+ ConstField(const Tuple& t) { return t.f4_; }
+};
+
+template <>
+class Get<5> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ Field(Tuple& t) { return t.f5_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple))
+ ConstField(const Tuple& t) { return t.f5_; }
+};
+
+template <>
+class Get<6> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ Field(Tuple& t) { return t.f6_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple))
+ ConstField(const Tuple& t) { return t.f6_; }
+};
+
+template <>
+class Get<7> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ Field(Tuple& t) { return t.f7_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple))
+ ConstField(const Tuple& t) { return t.f7_; }
+};
+
+template <>
+class Get<8> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ Field(Tuple& t) { return t.f8_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple))
+ ConstField(const Tuple& t) { return t.f8_; }
+};
+
+template <>
+class Get<9> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ Field(Tuple& t) { return t.f9_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple))
+ ConstField(const Tuple& t) { return t.f9_; }
+};
+
+} // namespace gtest_internal
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_10_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T)))
+get(const GTEST_10_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+ return true;
+ }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+ return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+ ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+ }
+};
+
+} // namespace gtest_internal
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator==(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) {
+ return gtest_internal::SameSizeTuplePrefixComparator<
+ tuple_size<GTEST_10_TUPLE_(T) >::value,
+ tuple_size<GTEST_10_TUPLE_(U) >::value>::Eq(t, u);
+}
+
+template <GTEST_10_TYPENAMES_(T), GTEST_10_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_10_TUPLE_(T)& t,
+ const GTEST_10_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+} // namespace tr1
+} // namespace std
+
+#undef GTEST_0_TUPLE_
+#undef GTEST_1_TUPLE_
+#undef GTEST_2_TUPLE_
+#undef GTEST_3_TUPLE_
+#undef GTEST_4_TUPLE_
+#undef GTEST_5_TUPLE_
+#undef GTEST_6_TUPLE_
+#undef GTEST_7_TUPLE_
+#undef GTEST_8_TUPLE_
+#undef GTEST_9_TUPLE_
+#undef GTEST_10_TUPLE_
+
+#undef GTEST_0_TYPENAMES_
+#undef GTEST_1_TYPENAMES_
+#undef GTEST_2_TYPENAMES_
+#undef GTEST_3_TYPENAMES_
+#undef GTEST_4_TYPENAMES_
+#undef GTEST_5_TYPENAMES_
+#undef GTEST_6_TYPENAMES_
+#undef GTEST_7_TYPENAMES_
+#undef GTEST_8_TYPENAMES_
+#undef GTEST_9_TYPENAMES_
+#undef GTEST_10_TYPENAMES_
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h.pump b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h.pump
new file mode 100644
index 000000000..429ddfeec
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-tuple.h.pump
@@ -0,0 +1,347 @@
+$$ -*- mode: c++; -*-
+$var n = 10 $$ Maximum number of tuple fields we want to support.
+$$ This meta comment fixes auto-indentation in Emacs. }}
+// Copyright 2009 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Implements a subset of TR1 tuple needed by Google Test and Google Mock.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
+
+#include <utility> // For ::std::pair.
+
+// The compiler used in Symbian has a bug that prevents us from declaring the
+// tuple template as a friend (it complains that tuple is redefined). This
+// hack bypasses the bug by declaring the members that should otherwise be
+// private as public.
+// Sun Studio versions < 12 also have the above bug.
+#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590)
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public:
+#else
+# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \
+ template <GTEST_$(n)_TYPENAMES_(U)> friend class tuple; \
+ private:
+#endif
+
+// Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that conflict
+// with our own definitions. Therefore using our own tuple does not work on
+// those compilers.
+#if defined(_MSC_VER) && _MSC_VER >= 1600 /* 1600 is Visual Studio 2010 */
+# error "gtest's tuple doesn't compile on Visual Studio 2010 or later. \
+GTEST_USE_OWN_TR1_TUPLE must be set to 0 on those compilers."
+#endif
+
+
+$range i 0..n-1
+$range j 0..n
+$range k 1..n
+// GTEST_n_TUPLE_(T) is the type of an n-tuple.
+#define GTEST_0_TUPLE_(T) tuple<>
+
+$for k [[
+$range m 0..k-1
+$range m2 k..n-1
+#define GTEST_$(k)_TUPLE_(T) tuple<$for m, [[T##$m]]$for m2 [[, void]]>
+
+]]
+
+// GTEST_n_TYPENAMES_(T) declares a list of n typenames.
+
+$for j [[
+$range m 0..j-1
+#define GTEST_$(j)_TYPENAMES_(T) $for m, [[typename T##$m]]
+
+
+]]
+
+// In theory, defining stuff in the ::std namespace is undefined
+// behavior. We can do this as we are playing the role of a standard
+// library vendor.
+namespace std {
+namespace tr1 {
+
+template <$for i, [[typename T$i = void]]>
+class tuple;
+
+// Anything in namespace gtest_internal is Google Test's INTERNAL
+// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code.
+namespace gtest_internal {
+
+// ByRef<T>::type is T if T is a reference; otherwise it's const T&.
+template <typename T>
+struct ByRef { typedef const T& type; }; // NOLINT
+template <typename T>
+struct ByRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for ByRef.
+#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef<T>::type
+
+// AddRef<T>::type is T if T is a reference; otherwise it's T&. This
+// is the same as tr1::add_reference<T>::type.
+template <typename T>
+struct AddRef { typedef T& type; }; // NOLINT
+template <typename T>
+struct AddRef<T&> { typedef T& type; }; // NOLINT
+
+// A handy wrapper for AddRef.
+#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef<T>::type
+
+// A helper for implementing get<k>().
+template <int k> class Get;
+
+// A helper for implementing tuple_element<k, T>. kIndexValid is true
+// iff k < the number of fields in tuple type T.
+template <bool kIndexValid, int kIndex, class Tuple>
+struct TupleElement;
+
+
+$for i [[
+template <GTEST_$(n)_TYPENAMES_(T)>
+struct TupleElement<true, $i, GTEST_$(n)_TUPLE_(T) > {
+ typedef T$i type;
+};
+
+
+]]
+} // namespace gtest_internal
+
+template <>
+class tuple<> {
+ public:
+ tuple() {}
+ tuple(const tuple& /* t */) {}
+ tuple& operator=(const tuple& /* t */) { return *this; }
+};
+
+
+$for k [[
+$range m 0..k-1
+template <GTEST_$(k)_TYPENAMES_(T)>
+class $if k < n [[GTEST_$(k)_TUPLE_(T)]] $else [[tuple]] {
+ public:
+ template <int k> friend class gtest_internal::Get;
+
+ tuple() : $for m, [[f$(m)_()]] {}
+
+ explicit tuple($for m, [[GTEST_BY_REF_(T$m) f$m]]) : [[]]
+$for m, [[f$(m)_(f$m)]] {}
+
+ tuple(const tuple& t) : $for m, [[f$(m)_(t.f$(m)_)]] {}
+
+ template <GTEST_$(k)_TYPENAMES_(U)>
+ tuple(const GTEST_$(k)_TUPLE_(U)& t) : $for m, [[f$(m)_(t.f$(m)_)]] {}
+
+$if k == 2 [[
+ template <typename U0, typename U1>
+ tuple(const ::std::pair<U0, U1>& p) : f0_(p.first), f1_(p.second) {}
+
+]]
+
+ tuple& operator=(const tuple& t) { return CopyFrom(t); }
+
+ template <GTEST_$(k)_TYPENAMES_(U)>
+ tuple& operator=(const GTEST_$(k)_TUPLE_(U)& t) {
+ return CopyFrom(t);
+ }
+
+$if k == 2 [[
+ template <typename U0, typename U1>
+ tuple& operator=(const ::std::pair<U0, U1>& p) {
+ f0_ = p.first;
+ f1_ = p.second;
+ return *this;
+ }
+
+]]
+
+ GTEST_DECLARE_TUPLE_AS_FRIEND_
+
+ template <GTEST_$(k)_TYPENAMES_(U)>
+ tuple& CopyFrom(const GTEST_$(k)_TUPLE_(U)& t) {
+
+$for m [[
+ f$(m)_ = t.f$(m)_;
+
+]]
+ return *this;
+ }
+
+
+$for m [[
+ T$m f$(m)_;
+
+]]
+};
+
+
+]]
+// 6.1.3.2 Tuple creation functions.
+
+// Known limitations: we don't support passing an
+// std::tr1::reference_wrapper<T> to make_tuple(). And we don't
+// implement tie().
+
+inline tuple<> make_tuple() { return tuple<>(); }
+
+$for k [[
+$range m 0..k-1
+
+template <GTEST_$(k)_TYPENAMES_(T)>
+inline GTEST_$(k)_TUPLE_(T) make_tuple($for m, [[const T$m& f$m]]) {
+ return GTEST_$(k)_TUPLE_(T)($for m, [[f$m]]);
+}
+
+]]
+
+// 6.1.3.3 Tuple helper classes.
+
+template <typename Tuple> struct tuple_size;
+
+
+$for j [[
+template <GTEST_$(j)_TYPENAMES_(T)>
+struct tuple_size<GTEST_$(j)_TUPLE_(T) > {
+ static const int value = $j;
+};
+
+
+]]
+template <int k, class Tuple>
+struct tuple_element {
+ typedef typename gtest_internal::TupleElement<
+ k < (tuple_size<Tuple>::value), k, Tuple>::type type;
+};
+
+#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element<k, Tuple >::type
+
+// 6.1.3.4 Element access.
+
+namespace gtest_internal {
+
+
+$for i [[
+template <>
+class Get<$i> {
+ public:
+ template <class Tuple>
+ static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_($i, Tuple))
+ Field(Tuple& t) { return t.f$(i)_; } // NOLINT
+
+ template <class Tuple>
+ static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_($i, Tuple))
+ ConstField(const Tuple& t) { return t.f$(i)_; }
+};
+
+
+]]
+} // namespace gtest_internal
+
+template <int k, GTEST_$(n)_TYPENAMES_(T)>
+GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_$(n)_TUPLE_(T)))
+get(GTEST_$(n)_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::Field(t);
+}
+
+template <int k, GTEST_$(n)_TYPENAMES_(T)>
+GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_$(n)_TUPLE_(T)))
+get(const GTEST_$(n)_TUPLE_(T)& t) {
+ return gtest_internal::Get<k>::ConstField(t);
+}
+
+// 6.1.3.5 Relational operators
+
+// We only implement == and !=, as we don't have a need for the rest yet.
+
+namespace gtest_internal {
+
+// SameSizeTuplePrefixComparator<k, k>::Eq(t1, t2) returns true if the
+// first k fields of t1 equals the first k fields of t2.
+// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if
+// k1 != k2.
+template <int kSize1, int kSize2>
+struct SameSizeTuplePrefixComparator;
+
+template <>
+struct SameSizeTuplePrefixComparator<0, 0> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) {
+ return true;
+ }
+};
+
+template <int k>
+struct SameSizeTuplePrefixComparator<k, k> {
+ template <class Tuple1, class Tuple2>
+ static bool Eq(const Tuple1& t1, const Tuple2& t2) {
+ return SameSizeTuplePrefixComparator<k - 1, k - 1>::Eq(t1, t2) &&
+ ::std::tr1::get<k - 1>(t1) == ::std::tr1::get<k - 1>(t2);
+ }
+};
+
+} // namespace gtest_internal
+
+template <GTEST_$(n)_TYPENAMES_(T), GTEST_$(n)_TYPENAMES_(U)>
+inline bool operator==(const GTEST_$(n)_TUPLE_(T)& t,
+ const GTEST_$(n)_TUPLE_(U)& u) {
+ return gtest_internal::SameSizeTuplePrefixComparator<
+ tuple_size<GTEST_$(n)_TUPLE_(T) >::value,
+ tuple_size<GTEST_$(n)_TUPLE_(U) >::value>::Eq(t, u);
+}
+
+template <GTEST_$(n)_TYPENAMES_(T), GTEST_$(n)_TYPENAMES_(U)>
+inline bool operator!=(const GTEST_$(n)_TUPLE_(T)& t,
+ const GTEST_$(n)_TUPLE_(U)& u) { return !(t == u); }
+
+// 6.1.4 Pairs.
+// Unimplemented.
+
+} // namespace tr1
+} // namespace std
+
+
+$for j [[
+#undef GTEST_$(j)_TUPLE_
+
+]]
+
+
+$for j [[
+#undef GTEST_$(j)_TYPENAMES_
+
+]]
+
+#undef GTEST_DECLARE_TUPLE_AS_FRIEND_
+#undef GTEST_BY_REF_
+#undef GTEST_ADD_REF_
+#undef GTEST_TUPLE_ELEMENT_
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h
new file mode 100644
index 000000000..e46f7cfcb
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h
@@ -0,0 +1,3331 @@
+// This file was GENERATED by command:
+// pump.py gtest-type-util.h.pump
+// DO NOT EDIT BY HAND!!!
+
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently we support at most 50 types in a list, and at most 50
+// type-parameterized tests in one type-parameterized test case.
+// Please contact googletestframework@googlegroups.com if you need
+// more.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+#include "gtest/internal/gtest-port.h"
+
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+# if GTEST_HAS_CXXABI_H_
+# include <cxxabi.h>
+# elif defined(__HP_aCC)
+# include <acxx_demangle.h>
+# endif // GTEST_HASH_CXXABI_H_
+
+namespace testing {
+namespace internal {
+
+// GetTypeName<T>() returns a human-readable name of type T.
+// NB: This function is also used in Google Mock, so don't move it inside of
+// the typed-test-only section below.
+template <typename T>
+std::string GetTypeName() {
+# if GTEST_HAS_RTTI
+
+ const char* const name = typeid(T).name();
+# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
+ int status = 0;
+ // gcc's implementation of typeid(T).name() mangles the type name,
+ // so we have to demangle it.
+# if GTEST_HAS_CXXABI_H_
+ using abi::__cxa_demangle;
+# endif // GTEST_HAS_CXXABI_H_
+ char* const readable_name = __cxa_demangle(name, 0, 0, &status);
+ const std::string name_str(status == 0 ? readable_name : name);
+ free(readable_name);
+ return name_str;
+# else
+ return name;
+# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC
+
+# else
+
+ return "<type>";
+
+# endif // GTEST_HAS_RTTI
+}
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// type. This can be used as a compile-time assertion to ensure that
+// two types are equal.
+
+template <typename T1, typename T2>
+struct AssertTypeEq;
+
+template <typename T>
+struct AssertTypeEq<T, T> {
+ typedef bool type;
+};
+
+// A unique type used as the default value for the arguments of class
+// template Types. This allows us to simulate variadic templates
+// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
+// support directly.
+struct None {};
+
+// The following family of struct and struct templates are used to
+// represent type lists. In particular, TypesN<T1, T2, ..., TN>
+// represents a type list with N types (T1, T2, ..., and TN) in it.
+// Except for Types0, every struct in the family has two member types:
+// Head for the first type in the list, and Tail for the rest of the
+// list.
+
+// The empty type list.
+struct Types0 {};
+
+// Type lists of length 1, 2, 3, and so on.
+
+template <typename T1>
+struct Types1 {
+ typedef T1 Head;
+ typedef Types0 Tail;
+};
+template <typename T1, typename T2>
+struct Types2 {
+ typedef T1 Head;
+ typedef Types1<T2> Tail;
+};
+
+template <typename T1, typename T2, typename T3>
+struct Types3 {
+ typedef T1 Head;
+ typedef Types2<T2, T3> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types4 {
+ typedef T1 Head;
+ typedef Types3<T2, T3, T4> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types5 {
+ typedef T1 Head;
+ typedef Types4<T2, T3, T4, T5> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types6 {
+ typedef T1 Head;
+ typedef Types5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types7 {
+ typedef T1 Head;
+ typedef Types6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types8 {
+ typedef T1 Head;
+ typedef Types7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types9 {
+ typedef T1 Head;
+ typedef Types8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types10 {
+ typedef T1 Head;
+ typedef Types9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types11 {
+ typedef T1 Head;
+ typedef Types10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types12 {
+ typedef T1 Head;
+ typedef Types11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types13 {
+ typedef T1 Head;
+ typedef Types12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types14 {
+ typedef T1 Head;
+ typedef Types13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types15 {
+ typedef T1 Head;
+ typedef Types14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types16 {
+ typedef T1 Head;
+ typedef Types15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types17 {
+ typedef T1 Head;
+ typedef Types16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types18 {
+ typedef T1 Head;
+ typedef Types17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types19 {
+ typedef T1 Head;
+ typedef Types18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types20 {
+ typedef T1 Head;
+ typedef Types19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types21 {
+ typedef T1 Head;
+ typedef Types20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types22 {
+ typedef T1 Head;
+ typedef Types21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types23 {
+ typedef T1 Head;
+ typedef Types22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types24 {
+ typedef T1 Head;
+ typedef Types23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types25 {
+ typedef T1 Head;
+ typedef Types24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types26 {
+ typedef T1 Head;
+ typedef Types25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types27 {
+ typedef T1 Head;
+ typedef Types26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types28 {
+ typedef T1 Head;
+ typedef Types27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types29 {
+ typedef T1 Head;
+ typedef Types28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types30 {
+ typedef T1 Head;
+ typedef Types29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types31 {
+ typedef T1 Head;
+ typedef Types30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types32 {
+ typedef T1 Head;
+ typedef Types31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types33 {
+ typedef T1 Head;
+ typedef Types32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types34 {
+ typedef T1 Head;
+ typedef Types33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types35 {
+ typedef T1 Head;
+ typedef Types34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types36 {
+ typedef T1 Head;
+ typedef Types35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types37 {
+ typedef T1 Head;
+ typedef Types36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types38 {
+ typedef T1 Head;
+ typedef Types37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types39 {
+ typedef T1 Head;
+ typedef Types38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types40 {
+ typedef T1 Head;
+ typedef Types39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types41 {
+ typedef T1 Head;
+ typedef Types40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types42 {
+ typedef T1 Head;
+ typedef Types41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types43 {
+ typedef T1 Head;
+ typedef Types42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types44 {
+ typedef T1 Head;
+ typedef Types43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types45 {
+ typedef T1 Head;
+ typedef Types44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types46 {
+ typedef T1 Head;
+ typedef Types45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types47 {
+ typedef T1 Head;
+ typedef Types46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types48 {
+ typedef T1 Head;
+ typedef Types47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types49 {
+ typedef T1 Head;
+ typedef Types48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct Types50 {
+ typedef T1 Head;
+ typedef Types49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+} // namespace internal
+
+// We don't want to require the users to write TypesN<...> directly,
+// as that would require them to count the length. Types<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Types<int>
+// will appear as Types<int, None, None, ..., None> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Types<T1, ..., TN>, and Google Test will translate
+// that to TypesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Types template.
+template <typename T1 = internal::None, typename T2 = internal::None,
+ typename T3 = internal::None, typename T4 = internal::None,
+ typename T5 = internal::None, typename T6 = internal::None,
+ typename T7 = internal::None, typename T8 = internal::None,
+ typename T9 = internal::None, typename T10 = internal::None,
+ typename T11 = internal::None, typename T12 = internal::None,
+ typename T13 = internal::None, typename T14 = internal::None,
+ typename T15 = internal::None, typename T16 = internal::None,
+ typename T17 = internal::None, typename T18 = internal::None,
+ typename T19 = internal::None, typename T20 = internal::None,
+ typename T21 = internal::None, typename T22 = internal::None,
+ typename T23 = internal::None, typename T24 = internal::None,
+ typename T25 = internal::None, typename T26 = internal::None,
+ typename T27 = internal::None, typename T28 = internal::None,
+ typename T29 = internal::None, typename T30 = internal::None,
+ typename T31 = internal::None, typename T32 = internal::None,
+ typename T33 = internal::None, typename T34 = internal::None,
+ typename T35 = internal::None, typename T36 = internal::None,
+ typename T37 = internal::None, typename T38 = internal::None,
+ typename T39 = internal::None, typename T40 = internal::None,
+ typename T41 = internal::None, typename T42 = internal::None,
+ typename T43 = internal::None, typename T44 = internal::None,
+ typename T45 = internal::None, typename T46 = internal::None,
+ typename T47 = internal::None, typename T48 = internal::None,
+ typename T49 = internal::None, typename T50 = internal::None>
+struct Types {
+ typedef internal::Types50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Types<internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types0 type;
+};
+template <typename T1>
+struct Types<T1, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types1<T1> type;
+};
+template <typename T1, typename T2>
+struct Types<T1, T2, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types2<T1, T2> type;
+};
+template <typename T1, typename T2, typename T3>
+struct Types<T1, T2, T3, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types3<T1, T2, T3> type;
+};
+template <typename T1, typename T2, typename T3, typename T4>
+struct Types<T1, T2, T3, T4, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types4<T1, T2, T3, T4> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+struct Types<T1, T2, T3, T4, T5, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types5<T1, T2, T3, T4, T5> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+struct Types<T1, T2, T3, T4, T5, T6, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types6<T1, T2, T3, T4, T5, T6> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7>
+struct Types<T1, T2, T3, T4, T5, T6, T7, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11,
+ T12> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25,
+ T26> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39,
+ T40> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, internal::None,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None, internal::None> {
+ typedef internal::Types43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None, internal::None> {
+ typedef internal::Types44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ internal::None, internal::None, internal::None, internal::None,
+ internal::None> {
+ typedef internal::Types45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, internal::None, internal::None, internal::None, internal::None> {
+ typedef internal::Types46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, internal::None, internal::None, internal::None> {
+ typedef internal::Types47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, internal::None, internal::None> {
+ typedef internal::Types48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49>
+struct Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14, T15,
+ T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29, T30,
+ T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44, T45,
+ T46, T47, T48, T49, internal::None> {
+ typedef internal::Types49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+namespace internal {
+
+# define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>. This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+ template <typename T>
+ struct Bind {
+ typedef Tmpl<T> type;
+ };
+};
+
+# define GTEST_BIND_(TmplSel, T) \
+ TmplSel::template Bind<T>::type
+
+// A unique struct template used as the default value for the
+// arguments of class template Templates. This allows us to simulate
+// variadic templates (e.g. Templates<int>, Templates<int, double>,
+// and etc), which C++ doesn't support directly.
+template <typename T>
+struct NoneT {};
+
+// The following family of struct and struct templates are used to
+// represent template lists. In particular, TemplatesN<T1, T2, ...,
+// TN> represents a list of N templates (T1, T2, ..., and TN). Except
+// for Templates0, every struct in the family has two member types:
+// Head for the selector of the first template in the list, and Tail
+// for the rest of the list.
+
+// The empty template list.
+struct Templates0 {};
+
+// Template lists of length 1, 2, 3, and so on.
+
+template <GTEST_TEMPLATE_ T1>
+struct Templates1 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates0 Tail;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates2 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates1<T2> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates3 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates2<T2, T3> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates4 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates3<T2, T3, T4> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates5 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates4<T2, T3, T4, T5> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates6 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates5<T2, T3, T4, T5, T6> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates7 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates6<T2, T3, T4, T5, T6, T7> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates8 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates7<T2, T3, T4, T5, T6, T7, T8> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates9 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates8<T2, T3, T4, T5, T6, T7, T8, T9> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates10 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates9<T2, T3, T4, T5, T6, T7, T8, T9, T10> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates11 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates10<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates12 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates11<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates13 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates12<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates14 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates13<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates15 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates14<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates16 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates15<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates17 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates16<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates18 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates17<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates19 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates18<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates20 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates19<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates21 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates20<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates22 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates21<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates23 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates22<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates24 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates23<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates25 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates24<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates26 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates25<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates27 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates26<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates28 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates27<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates29 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates28<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates30 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates29<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates31 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates30<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates32 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates31<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates33 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates32<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates34 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates33<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates35 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates34<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates36 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates35<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates37 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates36<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates38 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates37<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates39 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates38<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates40 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates39<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates41 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates40<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates42 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates41<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates43 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates42<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates44 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates43<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates45 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates44<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates46 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates45<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates47 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates46<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates48 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates47<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates49 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates48<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49> Tail;
+};
+
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49, GTEST_TEMPLATE_ T50>
+struct Templates50 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates49<T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42,
+ T43, T44, T45, T46, T47, T48, T49, T50> Tail;
+};
+
+
+// We don't want to require the users to write TemplatesN<...> directly,
+// as that would require them to count the length. Templates<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Templates<list>
+// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Templates<T1, ..., TN>, and Google Test will translate
+// that to TemplatesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Templates template.
+template <GTEST_TEMPLATE_ T1 = NoneT, GTEST_TEMPLATE_ T2 = NoneT,
+ GTEST_TEMPLATE_ T3 = NoneT, GTEST_TEMPLATE_ T4 = NoneT,
+ GTEST_TEMPLATE_ T5 = NoneT, GTEST_TEMPLATE_ T6 = NoneT,
+ GTEST_TEMPLATE_ T7 = NoneT, GTEST_TEMPLATE_ T8 = NoneT,
+ GTEST_TEMPLATE_ T9 = NoneT, GTEST_TEMPLATE_ T10 = NoneT,
+ GTEST_TEMPLATE_ T11 = NoneT, GTEST_TEMPLATE_ T12 = NoneT,
+ GTEST_TEMPLATE_ T13 = NoneT, GTEST_TEMPLATE_ T14 = NoneT,
+ GTEST_TEMPLATE_ T15 = NoneT, GTEST_TEMPLATE_ T16 = NoneT,
+ GTEST_TEMPLATE_ T17 = NoneT, GTEST_TEMPLATE_ T18 = NoneT,
+ GTEST_TEMPLATE_ T19 = NoneT, GTEST_TEMPLATE_ T20 = NoneT,
+ GTEST_TEMPLATE_ T21 = NoneT, GTEST_TEMPLATE_ T22 = NoneT,
+ GTEST_TEMPLATE_ T23 = NoneT, GTEST_TEMPLATE_ T24 = NoneT,
+ GTEST_TEMPLATE_ T25 = NoneT, GTEST_TEMPLATE_ T26 = NoneT,
+ GTEST_TEMPLATE_ T27 = NoneT, GTEST_TEMPLATE_ T28 = NoneT,
+ GTEST_TEMPLATE_ T29 = NoneT, GTEST_TEMPLATE_ T30 = NoneT,
+ GTEST_TEMPLATE_ T31 = NoneT, GTEST_TEMPLATE_ T32 = NoneT,
+ GTEST_TEMPLATE_ T33 = NoneT, GTEST_TEMPLATE_ T34 = NoneT,
+ GTEST_TEMPLATE_ T35 = NoneT, GTEST_TEMPLATE_ T36 = NoneT,
+ GTEST_TEMPLATE_ T37 = NoneT, GTEST_TEMPLATE_ T38 = NoneT,
+ GTEST_TEMPLATE_ T39 = NoneT, GTEST_TEMPLATE_ T40 = NoneT,
+ GTEST_TEMPLATE_ T41 = NoneT, GTEST_TEMPLATE_ T42 = NoneT,
+ GTEST_TEMPLATE_ T43 = NoneT, GTEST_TEMPLATE_ T44 = NoneT,
+ GTEST_TEMPLATE_ T45 = NoneT, GTEST_TEMPLATE_ T46 = NoneT,
+ GTEST_TEMPLATE_ T47 = NoneT, GTEST_TEMPLATE_ T48 = NoneT,
+ GTEST_TEMPLATE_ T49 = NoneT, GTEST_TEMPLATE_ T50 = NoneT>
+struct Templates {
+ typedef Templates50<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49, T50> type;
+};
+
+template <>
+struct Templates<NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates0 type;
+};
+template <GTEST_TEMPLATE_ T1>
+struct Templates<T1, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates1<T1> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2>
+struct Templates<T1, T2, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates2<T1, T2> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3>
+struct Templates<T1, T2, T3, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates3<T1, T2, T3> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4>
+struct Templates<T1, T2, T3, T4, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates4<T1, T2, T3, T4> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5>
+struct Templates<T1, T2, T3, T4, T5, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates5<T1, T2, T3, T4, T5> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6>
+struct Templates<T1, T2, T3, T4, T5, T6, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates6<T1, T2, T3, T4, T5, T6> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates7<T1, T2, T3, T4, T5, T6, T7> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates8<T1, T2, T3, T4, T5, T6, T7, T8> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates9<T1, T2, T3, T4, T5, T6, T7, T8, T9> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates10<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates11<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates12<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates13<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates14<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates15<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates16<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates17<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates18<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates19<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates20<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates21<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates22<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates23<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT> {
+ typedef Templates24<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates25<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates26<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates27<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT> {
+ typedef Templates28<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT> {
+ typedef Templates29<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates30<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates31<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates32<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates33<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates34<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates35<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates36<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, NoneT, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates37<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, NoneT, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates38<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates39<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, NoneT, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates40<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, NoneT, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates41<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, NoneT,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates42<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates43<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ NoneT, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates44<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, NoneT, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates45<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, NoneT, NoneT, NoneT, NoneT> {
+ typedef Templates46<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, NoneT, NoneT, NoneT> {
+ typedef Templates47<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, NoneT, NoneT> {
+ typedef Templates48<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48> type;
+};
+template <GTEST_TEMPLATE_ T1, GTEST_TEMPLATE_ T2, GTEST_TEMPLATE_ T3,
+ GTEST_TEMPLATE_ T4, GTEST_TEMPLATE_ T5, GTEST_TEMPLATE_ T6,
+ GTEST_TEMPLATE_ T7, GTEST_TEMPLATE_ T8, GTEST_TEMPLATE_ T9,
+ GTEST_TEMPLATE_ T10, GTEST_TEMPLATE_ T11, GTEST_TEMPLATE_ T12,
+ GTEST_TEMPLATE_ T13, GTEST_TEMPLATE_ T14, GTEST_TEMPLATE_ T15,
+ GTEST_TEMPLATE_ T16, GTEST_TEMPLATE_ T17, GTEST_TEMPLATE_ T18,
+ GTEST_TEMPLATE_ T19, GTEST_TEMPLATE_ T20, GTEST_TEMPLATE_ T21,
+ GTEST_TEMPLATE_ T22, GTEST_TEMPLATE_ T23, GTEST_TEMPLATE_ T24,
+ GTEST_TEMPLATE_ T25, GTEST_TEMPLATE_ T26, GTEST_TEMPLATE_ T27,
+ GTEST_TEMPLATE_ T28, GTEST_TEMPLATE_ T29, GTEST_TEMPLATE_ T30,
+ GTEST_TEMPLATE_ T31, GTEST_TEMPLATE_ T32, GTEST_TEMPLATE_ T33,
+ GTEST_TEMPLATE_ T34, GTEST_TEMPLATE_ T35, GTEST_TEMPLATE_ T36,
+ GTEST_TEMPLATE_ T37, GTEST_TEMPLATE_ T38, GTEST_TEMPLATE_ T39,
+ GTEST_TEMPLATE_ T40, GTEST_TEMPLATE_ T41, GTEST_TEMPLATE_ T42,
+ GTEST_TEMPLATE_ T43, GTEST_TEMPLATE_ T44, GTEST_TEMPLATE_ T45,
+ GTEST_TEMPLATE_ T46, GTEST_TEMPLATE_ T47, GTEST_TEMPLATE_ T48,
+ GTEST_TEMPLATE_ T49>
+struct Templates<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13, T14,
+ T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28, T29,
+ T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43, T44,
+ T45, T46, T47, T48, T49, NoneT> {
+ typedef Templates49<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27,
+ T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41,
+ T42, T43, T44, T45, T46, T47, T48, T49> type;
+};
+
+// The TypeList template makes it possible to use either a single type
+// or a Types<...> list in TYPED_TEST_CASE() and
+// INSTANTIATE_TYPED_TEST_CASE_P().
+
+template <typename T>
+struct TypeList {
+ typedef Types1<T> type;
+};
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6, typename T7, typename T8, typename T9, typename T10,
+ typename T11, typename T12, typename T13, typename T14, typename T15,
+ typename T16, typename T17, typename T18, typename T19, typename T20,
+ typename T21, typename T22, typename T23, typename T24, typename T25,
+ typename T26, typename T27, typename T28, typename T29, typename T30,
+ typename T31, typename T32, typename T33, typename T34, typename T35,
+ typename T36, typename T37, typename T38, typename T39, typename T40,
+ typename T41, typename T42, typename T43, typename T44, typename T45,
+ typename T46, typename T47, typename T48, typename T49, typename T50>
+struct TypeList<Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12, T13,
+ T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26, T27, T28,
+ T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40, T41, T42, T43,
+ T44, T45, T46, T47, T48, T49, T50> > {
+ typedef typename Types<T1, T2, T3, T4, T5, T6, T7, T8, T9, T10, T11, T12,
+ T13, T14, T15, T16, T17, T18, T19, T20, T21, T22, T23, T24, T25, T26,
+ T27, T28, T29, T30, T31, T32, T33, T34, T35, T36, T37, T38, T39, T40,
+ T41, T42, T43, T44, T45, T46, T47, T48, T49, T50>::type type;
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h.pump b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h.pump
new file mode 100644
index 000000000..251fdf025
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/include/gtest/internal/gtest-type-util.h.pump
@@ -0,0 +1,297 @@
+$$ -*- mode: c++; -*-
+$var n = 50 $$ Maximum length of type lists we want to support.
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Type utilities needed for implementing typed and type-parameterized
+// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND!
+//
+// Currently we support at most $n types in a list, and at most $n
+// type-parameterized tests in one type-parameterized test case.
+// Please contact googletestframework@googlegroups.com if you need
+// more.
+
+#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
+
+#include "gtest/internal/gtest-port.h"
+
+// #ifdef __GNUC__ is too general here. It is possible to use gcc without using
+// libstdc++ (which is where cxxabi.h comes from).
+# if GTEST_HAS_CXXABI_H_
+# include <cxxabi.h>
+# elif defined(__HP_aCC)
+# include <acxx_demangle.h>
+# endif // GTEST_HASH_CXXABI_H_
+
+namespace testing {
+namespace internal {
+
+// GetTypeName<T>() returns a human-readable name of type T.
+// NB: This function is also used in Google Mock, so don't move it inside of
+// the typed-test-only section below.
+template <typename T>
+std::string GetTypeName() {
+# if GTEST_HAS_RTTI
+
+ const char* const name = typeid(T).name();
+# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC)
+ int status = 0;
+ // gcc's implementation of typeid(T).name() mangles the type name,
+ // so we have to demangle it.
+# if GTEST_HAS_CXXABI_H_
+ using abi::__cxa_demangle;
+# endif // GTEST_HAS_CXXABI_H_
+ char* const readable_name = __cxa_demangle(name, 0, 0, &status);
+ const std::string name_str(status == 0 ? readable_name : name);
+ free(readable_name);
+ return name_str;
+# else
+ return name;
+# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC
+
+# else
+
+ return "<type>";
+
+# endif // GTEST_HAS_RTTI
+}
+
+#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+// AssertyTypeEq<T1, T2>::type is defined iff T1 and T2 are the same
+// type. This can be used as a compile-time assertion to ensure that
+// two types are equal.
+
+template <typename T1, typename T2>
+struct AssertTypeEq;
+
+template <typename T>
+struct AssertTypeEq<T, T> {
+ typedef bool type;
+};
+
+// A unique type used as the default value for the arguments of class
+// template Types. This allows us to simulate variadic templates
+// (e.g. Types<int>, Type<int, double>, and etc), which C++ doesn't
+// support directly.
+struct None {};
+
+// The following family of struct and struct templates are used to
+// represent type lists. In particular, TypesN<T1, T2, ..., TN>
+// represents a type list with N types (T1, T2, ..., and TN) in it.
+// Except for Types0, every struct in the family has two member types:
+// Head for the first type in the list, and Tail for the rest of the
+// list.
+
+// The empty type list.
+struct Types0 {};
+
+// Type lists of length 1, 2, 3, and so on.
+
+template <typename T1>
+struct Types1 {
+ typedef T1 Head;
+ typedef Types0 Tail;
+};
+
+$range i 2..n
+
+$for i [[
+$range j 1..i
+$range k 2..i
+template <$for j, [[typename T$j]]>
+struct Types$i {
+ typedef T1 Head;
+ typedef Types$(i-1)<$for k, [[T$k]]> Tail;
+};
+
+
+]]
+
+} // namespace internal
+
+// We don't want to require the users to write TypesN<...> directly,
+// as that would require them to count the length. Types<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Types<int>
+// will appear as Types<int, None, None, ..., None> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Types<T1, ..., TN>, and Google Test will translate
+// that to TypesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Types template.
+
+$range i 1..n
+template <$for i, [[typename T$i = internal::None]]>
+struct Types {
+ typedef internal::Types$n<$for i, [[T$i]]> type;
+};
+
+template <>
+struct Types<$for i, [[internal::None]]> {
+ typedef internal::Types0 type;
+};
+
+$range i 1..n-1
+$for i [[
+$range j 1..i
+$range k i+1..n
+template <$for j, [[typename T$j]]>
+struct Types<$for j, [[T$j]]$for k[[, internal::None]]> {
+ typedef internal::Types$i<$for j, [[T$j]]> type;
+};
+
+]]
+
+namespace internal {
+
+# define GTEST_TEMPLATE_ template <typename T> class
+
+// The template "selector" struct TemplateSel<Tmpl> is used to
+// represent Tmpl, which must be a class template with one type
+// parameter, as a type. TemplateSel<Tmpl>::Bind<T>::type is defined
+// as the type Tmpl<T>. This allows us to actually instantiate the
+// template "selected" by TemplateSel<Tmpl>.
+//
+// This trick is necessary for simulating typedef for class templates,
+// which C++ doesn't support directly.
+template <GTEST_TEMPLATE_ Tmpl>
+struct TemplateSel {
+ template <typename T>
+ struct Bind {
+ typedef Tmpl<T> type;
+ };
+};
+
+# define GTEST_BIND_(TmplSel, T) \
+ TmplSel::template Bind<T>::type
+
+// A unique struct template used as the default value for the
+// arguments of class template Templates. This allows us to simulate
+// variadic templates (e.g. Templates<int>, Templates<int, double>,
+// and etc), which C++ doesn't support directly.
+template <typename T>
+struct NoneT {};
+
+// The following family of struct and struct templates are used to
+// represent template lists. In particular, TemplatesN<T1, T2, ...,
+// TN> represents a list of N templates (T1, T2, ..., and TN). Except
+// for Templates0, every struct in the family has two member types:
+// Head for the selector of the first template in the list, and Tail
+// for the rest of the list.
+
+// The empty template list.
+struct Templates0 {};
+
+// Template lists of length 1, 2, 3, and so on.
+
+template <GTEST_TEMPLATE_ T1>
+struct Templates1 {
+ typedef TemplateSel<T1> Head;
+ typedef Templates0 Tail;
+};
+
+$range i 2..n
+
+$for i [[
+$range j 1..i
+$range k 2..i
+template <$for j, [[GTEST_TEMPLATE_ T$j]]>
+struct Templates$i {
+ typedef TemplateSel<T1> Head;
+ typedef Templates$(i-1)<$for k, [[T$k]]> Tail;
+};
+
+
+]]
+
+// We don't want to require the users to write TemplatesN<...> directly,
+// as that would require them to count the length. Templates<...> is much
+// easier to write, but generates horrible messages when there is a
+// compiler error, as gcc insists on printing out each template
+// argument, even if it has the default value (this means Templates<list>
+// will appear as Templates<list, NoneT, NoneT, ..., NoneT> in the compiler
+// errors).
+//
+// Our solution is to combine the best part of the two approaches: a
+// user would write Templates<T1, ..., TN>, and Google Test will translate
+// that to TemplatesN<T1, ..., TN> internally to make error messages
+// readable. The translation is done by the 'type' member of the
+// Templates template.
+
+$range i 1..n
+template <$for i, [[GTEST_TEMPLATE_ T$i = NoneT]]>
+struct Templates {
+ typedef Templates$n<$for i, [[T$i]]> type;
+};
+
+template <>
+struct Templates<$for i, [[NoneT]]> {
+ typedef Templates0 type;
+};
+
+$range i 1..n-1
+$for i [[
+$range j 1..i
+$range k i+1..n
+template <$for j, [[GTEST_TEMPLATE_ T$j]]>
+struct Templates<$for j, [[T$j]]$for k[[, NoneT]]> {
+ typedef Templates$i<$for j, [[T$j]]> type;
+};
+
+]]
+
+// The TypeList template makes it possible to use either a single type
+// or a Types<...> list in TYPED_TEST_CASE() and
+// INSTANTIATE_TYPED_TEST_CASE_P().
+
+template <typename T>
+struct TypeList {
+ typedef Types1<T> type;
+};
+
+
+$range i 1..n
+template <$for i, [[typename T$i]]>
+struct TypeList<Types<$for i, [[T$i]]> > {
+ typedef typename Types<$for i, [[T$i]]>::type type;
+};
+
+#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-all.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-all.cc
new file mode 100644
index 000000000..0a9cee522
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-all.cc
@@ -0,0 +1,48 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// Google C++ Testing Framework (Google Test)
+//
+// Sometimes it's desirable to build Google Test by compiling a single file.
+// This file serves this purpose.
+
+// This line ensures that gtest.h can be compiled on its own, even
+// when it's fused.
+#include "gtest/gtest.h"
+
+// The following lines pull in the real gtest *.cc files.
+#include "src/gtest.cc"
+#include "src/gtest-death-test.cc"
+#include "src/gtest-filepath.cc"
+#include "src/gtest-port.cc"
+#include "src/gtest-printers.cc"
+#include "src/gtest-test-part.cc"
+#include "src/gtest-typed-test.cc"
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-death-test.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-death-test.cc
new file mode 100644
index 000000000..a01a36983
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-death-test.cc
@@ -0,0 +1,1342 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev)
+//
+// This file implements death tests.
+
+#include "gtest/gtest-death-test.h"
+#include "gtest/internal/gtest-port.h"
+#include "gtest/internal/custom/gtest.h"
+
+#if GTEST_HAS_DEATH_TEST
+
+# if GTEST_OS_MAC
+# include <crt_externs.h>
+# endif // GTEST_OS_MAC
+
+# include <errno.h>
+# include <fcntl.h>
+# include <limits.h>
+
+# if GTEST_OS_LINUX
+# include <signal.h>
+# endif // GTEST_OS_LINUX
+
+# include <stdarg.h>
+
+# if GTEST_OS_WINDOWS
+# include <windows.h>
+# else
+# include <sys/mman.h>
+# include <sys/wait.h>
+# endif // GTEST_OS_WINDOWS
+
+# if GTEST_OS_QNX
+# include <spawn.h>
+# endif // GTEST_OS_QNX
+
+#endif // GTEST_HAS_DEATH_TEST
+
+#include "gtest/gtest-message.h"
+#include "gtest/internal/gtest-string.h"
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick exists to
+// prevent the accidental inclusion of gtest-internal-inl.h in the
+// user's code.
+#define GTEST_IMPLEMENTATION_ 1
+#include "src/gtest-internal-inl.h"
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+// Constants.
+
+// The default death test style.
+static const char kDefaultDeathTestStyle[] = "fast";
+
+GTEST_DEFINE_string_(
+ death_test_style,
+ internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle),
+ "Indicates how to run a death test in a forked child process: "
+ "\"threadsafe\" (child process re-executes the test binary "
+ "from the beginning, running only the specific death test) or "
+ "\"fast\" (child process runs the death test immediately "
+ "after forking).");
+
+GTEST_DEFINE_bool_(
+ death_test_use_fork,
+ internal::BoolFromGTestEnv("death_test_use_fork", false),
+ "Instructs to use fork()/_exit() instead of clone() in death tests. "
+ "Ignored and always uses fork() on POSIX systems where clone() is not "
+ "implemented. Useful when running under valgrind or similar tools if "
+ "those do not support clone(). Valgrind 3.3.1 will just fail if "
+ "it sees an unsupported combination of clone() flags. "
+ "It is not recommended to use this flag w/o valgrind though it will "
+ "work in 99% of the cases. Once valgrind is fixed, this flag will "
+ "most likely be removed.");
+
+namespace internal {
+GTEST_DEFINE_string_(
+ internal_run_death_test, "",
+ "Indicates the file, line number, temporal index of "
+ "the single death test to run, and a file descriptor to "
+ "which a success code may be sent, all separated by "
+ "the '|' characters. This flag is specified if and only if the current "
+ "process is a sub-process launched for running a thread-safe "
+ "death test. FOR INTERNAL USE ONLY.");
+} // namespace internal
+
+#if GTEST_HAS_DEATH_TEST
+
+namespace internal {
+
+// Valid only for fast death tests. Indicates the code is running in the
+// child process of a fast style death test.
+# if !GTEST_OS_WINDOWS
+static bool g_in_fast_death_test_child = false;
+# endif
+
+// Returns a Boolean value indicating whether the caller is currently
+// executing in the context of the death test child process. Tools such as
+// Valgrind heap checkers may need this to modify their behavior in death
+// tests. IMPORTANT: This is an internal utility. Using it may break the
+// implementation of death tests. User code MUST NOT use it.
+bool InDeathTestChild() {
+# if GTEST_OS_WINDOWS
+
+ // On Windows, death tests are thread-safe regardless of the value of the
+ // death_test_style flag.
+ return !GTEST_FLAG(internal_run_death_test).empty();
+
+# else
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe")
+ return !GTEST_FLAG(internal_run_death_test).empty();
+ else
+ return g_in_fast_death_test_child;
+#endif
+}
+
+} // namespace internal
+
+// ExitedWithCode constructor.
+ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) {
+}
+
+// ExitedWithCode function-call operator.
+bool ExitedWithCode::operator()(int exit_status) const {
+# if GTEST_OS_WINDOWS
+
+ return exit_status == exit_code_;
+
+# else
+
+ return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_;
+
+# endif // GTEST_OS_WINDOWS
+}
+
+# if !GTEST_OS_WINDOWS
+// KilledBySignal constructor.
+KilledBySignal::KilledBySignal(int signum) : signum_(signum) {
+}
+
+// KilledBySignal function-call operator.
+bool KilledBySignal::operator()(int exit_status) const {
+# if defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)
+ {
+ bool result;
+ if (GTEST_KILLED_BY_SIGNAL_OVERRIDE_(signum_, exit_status, &result)) {
+ return result;
+ }
+ }
+# endif // defined(GTEST_KILLED_BY_SIGNAL_OVERRIDE_)
+ return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_;
+}
+# endif // !GTEST_OS_WINDOWS
+
+namespace internal {
+
+// Utilities needed for death tests.
+
+// Generates a textual description of a given exit code, in the format
+// specified by wait(2).
+static std::string ExitSummary(int exit_code) {
+ Message m;
+
+# if GTEST_OS_WINDOWS
+
+ m << "Exited with exit status " << exit_code;
+
+# else
+
+ if (WIFEXITED(exit_code)) {
+ m << "Exited with exit status " << WEXITSTATUS(exit_code);
+ } else if (WIFSIGNALED(exit_code)) {
+ m << "Terminated by signal " << WTERMSIG(exit_code);
+ }
+# ifdef WCOREDUMP
+ if (WCOREDUMP(exit_code)) {
+ m << " (core dumped)";
+ }
+# endif
+# endif // GTEST_OS_WINDOWS
+
+ return m.GetString();
+}
+
+// Returns true if exit_status describes a process that was terminated
+// by a signal, or exited normally with a nonzero exit code.
+bool ExitedUnsuccessfully(int exit_status) {
+ return !ExitedWithCode(0)(exit_status);
+}
+
+# if !GTEST_OS_WINDOWS
+// Generates a textual failure message when a death test finds more than
+// one thread running, or cannot determine the number of threads, prior
+// to executing the given statement. It is the responsibility of the
+// caller not to pass a thread_count of 1.
+static std::string DeathTestThreadWarning(size_t thread_count) {
+ Message msg;
+ msg << "Death tests use fork(), which is unsafe particularly"
+ << " in a threaded context. For this test, " << GTEST_NAME_ << " ";
+ if (thread_count == 0)
+ msg << "couldn't detect the number of threads.";
+ else
+ msg << "detected " << thread_count << " threads.";
+ return msg.GetString();
+}
+# endif // !GTEST_OS_WINDOWS
+
+// Flag characters for reporting a death test that did not die.
+static const char kDeathTestLived = 'L';
+static const char kDeathTestReturned = 'R';
+static const char kDeathTestThrew = 'T';
+static const char kDeathTestInternalError = 'I';
+
+// An enumeration describing all of the possible ways that a death test can
+// conclude. DIED means that the process died while executing the test
+// code; LIVED means that process lived beyond the end of the test code;
+// RETURNED means that the test statement attempted to execute a return
+// statement, which is not allowed; THREW means that the test statement
+// returned control by throwing an exception. IN_PROGRESS means the test
+// has not yet concluded.
+// TODO(vladl@google.com): Unify names and possibly values for
+// AbortReason, DeathTestOutcome, and flag characters above.
+enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW };
+
+// Routine for aborting the program which is safe to call from an
+// exec-style death test child process, in which case the error
+// message is propagated back to the parent process. Otherwise, the
+// message is simply printed to stderr. In either case, the program
+// then exits with status 1.
+void DeathTestAbort(const std::string& message) {
+ // On a POSIX system, this function may be called from a threadsafe-style
+ // death test child process, which operates on a very small stack. Use
+ // the heap for any additional non-minuscule memory requirements.
+ const InternalRunDeathTestFlag* const flag =
+ GetUnitTestImpl()->internal_run_death_test_flag();
+ if (flag != NULL) {
+ FILE* parent = posix::FDOpen(flag->write_fd(), "w");
+ fputc(kDeathTestInternalError, parent);
+ fprintf(parent, "%s", message.c_str());
+ fflush(parent);
+ _exit(1);
+ } else {
+ fprintf(stderr, "%s", message.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+
+// A replacement for CHECK that calls DeathTestAbort if the assertion
+// fails.
+# define GTEST_DEATH_TEST_CHECK_(expression) \
+ do { \
+ if (!::testing::internal::IsTrue(expression)) { \
+ DeathTestAbort( \
+ ::std::string("CHECK failed: File ") + __FILE__ + ", line " \
+ + ::testing::internal::StreamableToString(__LINE__) + ": " \
+ + #expression); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for
+// evaluating any system call that fulfills two conditions: it must return
+// -1 on failure, and set errno to EINTR when it is interrupted and
+// should be tried again. The macro expands to a loop that repeatedly
+// evaluates the expression as long as it evaluates to -1 and sets
+// errno to EINTR. If the expression evaluates to -1 but errno is
+// something other than EINTR, DeathTestAbort is called.
+# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \
+ do { \
+ int gtest_retval; \
+ do { \
+ gtest_retval = (expression); \
+ } while (gtest_retval == -1 && errno == EINTR); \
+ if (gtest_retval == -1) { \
+ DeathTestAbort( \
+ ::std::string("CHECK failed: File ") + __FILE__ + ", line " \
+ + ::testing::internal::StreamableToString(__LINE__) + ": " \
+ + #expression + " != -1"); \
+ } \
+ } while (::testing::internal::AlwaysFalse())
+
+// Returns the message describing the last system error in errno.
+std::string GetLastErrnoDescription() {
+ return errno == 0 ? "" : posix::StrError(errno);
+}
+
+// This is called from a death test parent process to read a failure
+// message from the death test child process and log it with the FATAL
+// severity. On Windows, the message is read from a pipe handle. On other
+// platforms, it is read from a file descriptor.
+static void FailFromInternalError(int fd) {
+ Message error;
+ char buffer[256];
+ int num_read;
+
+ do {
+ while ((num_read = posix::Read(fd, buffer, 255)) > 0) {
+ buffer[num_read] = '\0';
+ error << buffer;
+ }
+ } while (num_read == -1 && errno == EINTR);
+
+ if (num_read == 0) {
+ GTEST_LOG_(FATAL) << error.GetString();
+ } else {
+ const int last_error = errno;
+ GTEST_LOG_(FATAL) << "Error while reading death test internal: "
+ << GetLastErrnoDescription() << " [" << last_error << "]";
+ }
+}
+
+// Death test constructor. Increments the running death test count
+// for the current test.
+DeathTest::DeathTest() {
+ TestInfo* const info = GetUnitTestImpl()->current_test_info();
+ if (info == NULL) {
+ DeathTestAbort("Cannot run a death test outside of a TEST or "
+ "TEST_F construct");
+ }
+}
+
+// Creates and returns a death test by dispatching to the current
+// death test factory.
+bool DeathTest::Create(const char* statement, const RE* regex,
+ const char* file, int line, DeathTest** test) {
+ return GetUnitTestImpl()->death_test_factory()->Create(
+ statement, regex, file, line, test);
+}
+
+const char* DeathTest::LastMessage() {
+ return last_death_test_message_.c_str();
+}
+
+void DeathTest::set_last_death_test_message(const std::string& message) {
+ last_death_test_message_ = message;
+}
+
+std::string DeathTest::last_death_test_message_;
+
+// Provides cross platform implementation for some death functionality.
+class DeathTestImpl : public DeathTest {
+ protected:
+ DeathTestImpl(const char* a_statement, const RE* a_regex)
+ : statement_(a_statement),
+ regex_(a_regex),
+ spawned_(false),
+ status_(-1),
+ outcome_(IN_PROGRESS),
+ read_fd_(-1),
+ write_fd_(-1) {}
+
+ // read_fd_ is expected to be closed and cleared by a derived class.
+ ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); }
+
+ void Abort(AbortReason reason);
+ virtual bool Passed(bool status_ok);
+
+ const char* statement() const { return statement_; }
+ const RE* regex() const { return regex_; }
+ bool spawned() const { return spawned_; }
+ void set_spawned(bool is_spawned) { spawned_ = is_spawned; }
+ int status() const { return status_; }
+ void set_status(int a_status) { status_ = a_status; }
+ DeathTestOutcome outcome() const { return outcome_; }
+ void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; }
+ int read_fd() const { return read_fd_; }
+ void set_read_fd(int fd) { read_fd_ = fd; }
+ int write_fd() const { return write_fd_; }
+ void set_write_fd(int fd) { write_fd_ = fd; }
+
+ // Called in the parent process only. Reads the result code of the death
+ // test child process via a pipe, interprets it to set the outcome_
+ // member, and closes read_fd_. Outputs diagnostics and terminates in
+ // case of unexpected codes.
+ void ReadAndInterpretStatusByte();
+
+ private:
+ // The textual content of the code this object is testing. This class
+ // doesn't own this string and should not attempt to delete it.
+ const char* const statement_;
+ // The regular expression which test output must match. DeathTestImpl
+ // doesn't own this object and should not attempt to delete it.
+ const RE* const regex_;
+ // True if the death test child process has been successfully spawned.
+ bool spawned_;
+ // The exit status of the child process.
+ int status_;
+ // How the death test concluded.
+ DeathTestOutcome outcome_;
+ // Descriptor to the read end of the pipe to the child process. It is
+ // always -1 in the child process. The child keeps its write end of the
+ // pipe in write_fd_.
+ int read_fd_;
+ // Descriptor to the child's write end of the pipe to the parent process.
+ // It is always -1 in the parent process. The parent keeps its end of the
+ // pipe in read_fd_.
+ int write_fd_;
+};
+
+// Called in the parent process only. Reads the result code of the death
+// test child process via a pipe, interprets it to set the outcome_
+// member, and closes read_fd_. Outputs diagnostics and terminates in
+// case of unexpected codes.
+void DeathTestImpl::ReadAndInterpretStatusByte() {
+ char flag;
+ int bytes_read;
+
+ // The read() here blocks until data is available (signifying the
+ // failure of the death test) or until the pipe is closed (signifying
+ // its success), so it's okay to call this in the parent before
+ // the child process has exited.
+ do {
+ bytes_read = posix::Read(read_fd(), &flag, 1);
+ } while (bytes_read == -1 && errno == EINTR);
+
+ if (bytes_read == 0) {
+ set_outcome(DIED);
+ } else if (bytes_read == 1) {
+ switch (flag) {
+ case kDeathTestReturned:
+ set_outcome(RETURNED);
+ break;
+ case kDeathTestThrew:
+ set_outcome(THREW);
+ break;
+ case kDeathTestLived:
+ set_outcome(LIVED);
+ break;
+ case kDeathTestInternalError:
+ FailFromInternalError(read_fd()); // Does not return.
+ break;
+ default:
+ GTEST_LOG_(FATAL) << "Death test child process reported "
+ << "unexpected status byte ("
+ << static_cast<unsigned int>(flag) << ")";
+ }
+ } else {
+ GTEST_LOG_(FATAL) << "Read from death test child process failed: "
+ << GetLastErrnoDescription();
+ }
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd()));
+ set_read_fd(-1);
+}
+
+// Signals that the death test code which should have exited, didn't.
+// Should be called only in a death test child process.
+// Writes a status byte to the child's status file descriptor, then
+// calls _exit(1).
+void DeathTestImpl::Abort(AbortReason reason) {
+ // The parent process considers the death test to be a failure if
+ // it finds any data in our pipe. So, here we write a single flag byte
+ // to the pipe, then exit.
+ const char status_ch =
+ reason == TEST_DID_NOT_DIE ? kDeathTestLived :
+ reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned;
+
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1));
+ // We are leaking the descriptor here because on some platforms (i.e.,
+ // when built as Windows DLL), destructors of global objects will still
+ // run after calling _exit(). On such systems, write_fd_ will be
+ // indirectly closed from the destructor of UnitTestImpl, causing double
+ // close if it is also closed here. On debug configurations, double close
+ // may assert. As there are no in-process buffers to flush here, we are
+ // relying on the OS to close the descriptor after the process terminates
+ // when the destructors are not run.
+ _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash)
+}
+
+// Returns an indented copy of stderr output for a death test.
+// This makes distinguishing death test output lines from regular log lines
+// much easier.
+static ::std::string FormatDeathTestOutput(const ::std::string& output) {
+ ::std::string ret;
+ for (size_t at = 0; ; ) {
+ const size_t line_end = output.find('\n', at);
+ ret += "[ DEATH ] ";
+ if (line_end == ::std::string::npos) {
+ ret += output.substr(at);
+ break;
+ }
+ ret += output.substr(at, line_end + 1 - at);
+ at = line_end + 1;
+ }
+ return ret;
+}
+
+// Assesses the success or failure of a death test, using both private
+// members which have previously been set, and one argument:
+//
+// Private data members:
+// outcome: An enumeration describing how the death test
+// concluded: DIED, LIVED, THREW, or RETURNED. The death test
+// fails in the latter three cases.
+// status: The exit status of the child process. On *nix, it is in the
+// in the format specified by wait(2). On Windows, this is the
+// value supplied to the ExitProcess() API or a numeric code
+// of the exception that terminated the program.
+// regex: A regular expression object to be applied to
+// the test's captured standard error output; the death test
+// fails if it does not match.
+//
+// Argument:
+// status_ok: true if exit_status is acceptable in the context of
+// this particular death test, which fails if it is false
+//
+// Returns true iff all of the above conditions are met. Otherwise, the
+// first failing condition, in the order given above, is the one that is
+// reported. Also sets the last death test message string.
+bool DeathTestImpl::Passed(bool status_ok) {
+ if (!spawned())
+ return false;
+
+ const std::string error_message = GetCapturedStderr();
+
+ bool success = false;
+ Message buffer;
+
+ buffer << "Death test: " << statement() << "\n";
+ switch (outcome()) {
+ case LIVED:
+ buffer << " Result: failed to die.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case THREW:
+ buffer << " Result: threw an exception.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case RETURNED:
+ buffer << " Result: illegal return in test statement.\n"
+ << " Error msg:\n" << FormatDeathTestOutput(error_message);
+ break;
+ case DIED:
+ if (status_ok) {
+ const bool matched = RE::PartialMatch(error_message.c_str(), *regex());
+ if (matched) {
+ success = true;
+ } else {
+ buffer << " Result: died but not with expected error.\n"
+ << " Expected: " << regex()->pattern() << "\n"
+ << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+ }
+ } else {
+ buffer << " Result: died but not with expected exit code:\n"
+ << " " << ExitSummary(status()) << "\n"
+ << "Actual msg:\n" << FormatDeathTestOutput(error_message);
+ }
+ break;
+ case IN_PROGRESS:
+ default:
+ GTEST_LOG_(FATAL)
+ << "DeathTest::Passed somehow called before conclusion of test";
+ }
+
+ DeathTest::set_last_death_test_message(buffer.GetString());
+ return success;
+}
+
+# if GTEST_OS_WINDOWS
+// WindowsDeathTest implements death tests on Windows. Due to the
+// specifics of starting new processes on Windows, death tests there are
+// always threadsafe, and Google Test considers the
+// --gtest_death_test_style=fast setting to be equivalent to
+// --gtest_death_test_style=threadsafe there.
+//
+// A few implementation notes: Like the Linux version, the Windows
+// implementation uses pipes for child-to-parent communication. But due to
+// the specifics of pipes on Windows, some extra steps are required:
+//
+// 1. The parent creates a communication pipe and stores handles to both
+// ends of it.
+// 2. The parent starts the child and provides it with the information
+// necessary to acquire the handle to the write end of the pipe.
+// 3. The child acquires the write end of the pipe and signals the parent
+// using a Windows event.
+// 4. Now the parent can release the write end of the pipe on its side. If
+// this is done before step 3, the object's reference count goes down to
+// 0 and it is destroyed, preventing the child from acquiring it. The
+// parent now has to release it, or read operations on the read end of
+// the pipe will not return when the child terminates.
+// 5. The parent reads child's output through the pipe (outcome code and
+// any possible error messages) from the pipe, and its stderr and then
+// determines whether to fail the test.
+//
+// Note: to distinguish Win32 API calls from the local method and function
+// calls, the former are explicitly resolved in the global namespace.
+//
+class WindowsDeathTest : public DeathTestImpl {
+ public:
+ WindowsDeathTest(const char* a_statement,
+ const RE* a_regex,
+ const char* file,
+ int line)
+ : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {}
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+ virtual TestRole AssumeRole();
+
+ private:
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+ // Handle to the write end of the pipe to the child process.
+ AutoHandle write_handle_;
+ // Child process handle.
+ AutoHandle child_handle_;
+ // Event the child process uses to signal the parent that it has
+ // acquired the handle to the write end of the pipe. After seeing this
+ // event the parent can release its own handles to make sure its
+ // ReadFile() calls return when the child terminates.
+ AutoHandle event_handle_;
+};
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int WindowsDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ // Wait until the child either signals that it has acquired the write end
+ // of the pipe or it dies.
+ const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() };
+ switch (::WaitForMultipleObjects(2,
+ wait_handles,
+ FALSE, // Waits for any of the handles.
+ INFINITE)) {
+ case WAIT_OBJECT_0:
+ case WAIT_OBJECT_0 + 1:
+ break;
+ default:
+ GTEST_DEATH_TEST_CHECK_(false); // Should not get here.
+ }
+
+ // The child has acquired the write end of the pipe or exited.
+ // We release the handle on our side and continue.
+ write_handle_.Reset();
+ event_handle_.Reset();
+
+ ReadAndInterpretStatusByte();
+
+ // Waits for the child process to exit if it haven't already. This
+ // returns immediately if the child has already exited, regardless of
+ // whether previous calls to WaitForMultipleObjects synchronized on this
+ // handle or not.
+ GTEST_DEATH_TEST_CHECK_(
+ WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(),
+ INFINITE));
+ DWORD status_code;
+ GTEST_DEATH_TEST_CHECK_(
+ ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE);
+ child_handle_.Reset();
+ set_status(static_cast<int>(status_code));
+ return status();
+}
+
+// The AssumeRole process for a Windows death test. It creates a child
+// process with the same executable as the current process to run the
+// death test. The child process is given the --gtest_filter and
+// --gtest_internal_run_death_test flags such that it knows to run the
+// current death test only.
+DeathTest::TestRole WindowsDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ // ParseInternalRunDeathTestFlag() has performed all the necessary
+ // processing.
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ // WindowsDeathTest uses an anonymous pipe to communicate results of
+ // a death test.
+ SECURITY_ATTRIBUTES handles_are_inheritable = {
+ sizeof(SECURITY_ATTRIBUTES), NULL, TRUE };
+ HANDLE read_handle, write_handle;
+ GTEST_DEATH_TEST_CHECK_(
+ ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable,
+ 0) // Default buffer size.
+ != FALSE);
+ set_read_fd(::_open_osfhandle(reinterpret_cast<intptr_t>(read_handle),
+ O_RDONLY));
+ write_handle_.Reset(write_handle);
+ event_handle_.Reset(::CreateEvent(
+ &handles_are_inheritable,
+ TRUE, // The event will automatically reset to non-signaled state.
+ FALSE, // The initial state is non-signalled.
+ NULL)); // The even is unnamed.
+ GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL);
+ const std::string filter_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" +
+ info->test_case_name() + "." + info->name();
+ const std::string internal_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag +
+ "=" + file_ + "|" + StreamableToString(line_) + "|" +
+ StreamableToString(death_test_index) + "|" +
+ StreamableToString(static_cast<unsigned int>(::GetCurrentProcessId())) +
+ // size_t has the same width as pointers on both 32-bit and 64-bit
+ // Windows platforms.
+ // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx.
+ "|" + StreamableToString(reinterpret_cast<size_t>(write_handle)) +
+ "|" + StreamableToString(reinterpret_cast<size_t>(event_handle_.Get()));
+
+ char executable_path[_MAX_PATH + 1]; // NOLINT
+ GTEST_DEATH_TEST_CHECK_(
+ _MAX_PATH + 1 != ::GetModuleFileNameA(NULL,
+ executable_path,
+ _MAX_PATH));
+
+ std::string command_line =
+ std::string(::GetCommandLineA()) + " " + filter_flag + " \"" +
+ internal_flag + "\"";
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // Flush the log buffers since the log streams are shared with the child.
+ FlushInfoLog();
+
+ // The child process will share the standard handles with the parent.
+ STARTUPINFOA startup_info;
+ memset(&startup_info, 0, sizeof(STARTUPINFO));
+ startup_info.dwFlags = STARTF_USESTDHANDLES;
+ startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE);
+ startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE);
+ startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE);
+
+ PROCESS_INFORMATION process_info;
+ GTEST_DEATH_TEST_CHECK_(::CreateProcessA(
+ executable_path,
+ const_cast<char*>(command_line.c_str()),
+ NULL, // Retuned process handle is not inheritable.
+ NULL, // Retuned thread handle is not inheritable.
+ TRUE, // Child inherits all inheritable handles (for write_handle_).
+ 0x0, // Default creation flags.
+ NULL, // Inherit the parent's environment.
+ UnitTest::GetInstance()->original_working_dir(),
+ &startup_info,
+ &process_info) != FALSE);
+ child_handle_.Reset(process_info.hProcess);
+ ::CloseHandle(process_info.hThread);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+# else // We are not on Windows.
+
+// ForkingDeathTest provides implementations for most of the abstract
+// methods of the DeathTest interface. Only the AssumeRole method is
+// left undefined.
+class ForkingDeathTest : public DeathTestImpl {
+ public:
+ ForkingDeathTest(const char* statement, const RE* regex);
+
+ // All of these virtual functions are inherited from DeathTest.
+ virtual int Wait();
+
+ protected:
+ void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; }
+
+ private:
+ // PID of child process during death test; 0 in the child process itself.
+ pid_t child_pid_;
+};
+
+// Constructs a ForkingDeathTest.
+ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex)
+ : DeathTestImpl(a_statement, a_regex),
+ child_pid_(-1) {}
+
+// Waits for the child in a death test to exit, returning its exit
+// status, or 0 if no child process exists. As a side effect, sets the
+// outcome data member.
+int ForkingDeathTest::Wait() {
+ if (!spawned())
+ return 0;
+
+ ReadAndInterpretStatusByte();
+
+ int status_value;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0));
+ set_status(status_value);
+ return status_value;
+}
+
+// A concrete death test class that forks, then immediately runs the test
+// in the child process.
+class NoExecDeathTest : public ForkingDeathTest {
+ public:
+ NoExecDeathTest(const char* a_statement, const RE* a_regex) :
+ ForkingDeathTest(a_statement, a_regex) { }
+ virtual TestRole AssumeRole();
+};
+
+// The AssumeRole process for a fork-and-run death test. It implements a
+// straightforward fork, with a simple pipe to transmit the status byte.
+DeathTest::TestRole NoExecDeathTest::AssumeRole() {
+ const size_t thread_count = GetThreadCount();
+ if (thread_count != 1) {
+ GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count);
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+
+ DeathTest::set_last_death_test_message("");
+ CaptureStderr();
+ // When we fork the process below, the log file buffers are copied, but the
+ // file descriptors are shared. We flush all log files here so that closing
+ // the file descriptors in the child process doesn't throw off the
+ // synchronization between descriptors and buffers in the parent process.
+ // This is as close to the fork as possible to avoid a race condition in case
+ // there are multiple threads running before the death test, and another
+ // thread writes to the log file.
+ FlushInfoLog();
+
+ const pid_t child_pid = fork();
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ set_child_pid(child_pid);
+ if (child_pid == 0) {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0]));
+ set_write_fd(pipe_fd[1]);
+ // Redirects all logging to stderr in the child process to prevent
+ // concurrent writes to the log files. We capture stderr in the parent
+ // process and append the child process' output to a log.
+ LogToStderr();
+ // Event forwarding to the listeners of event listener API mush be shut
+ // down in death test subprocesses.
+ GetUnitTestImpl()->listeners()->SuppressEventForwarding();
+ g_in_fast_death_test_child = true;
+ return EXECUTE_TEST;
+ } else {
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+ }
+}
+
+// A concrete death test class that forks and re-executes the main
+// program from the beginning, with command-line flags set that cause
+// only this specific death test to be run.
+class ExecDeathTest : public ForkingDeathTest {
+ public:
+ ExecDeathTest(const char* a_statement, const RE* a_regex,
+ const char* file, int line) :
+ ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { }
+ virtual TestRole AssumeRole();
+ private:
+ static ::std::vector<testing::internal::string>
+ GetArgvsForDeathTestChildProcess() {
+ ::std::vector<testing::internal::string> args = GetInjectableArgvs();
+# if defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)
+ ::std::vector<testing::internal::string> extra_args =
+ GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_();
+ args.insert(args.end(), extra_args.begin(), extra_args.end());
+# endif // defined(GTEST_EXTRA_DEATH_TEST_COMMAND_LINE_ARGS_)
+ return args;
+ }
+ // The name of the file in which the death test is located.
+ const char* const file_;
+ // The line number on which the death test is located.
+ const int line_;
+};
+
+// Utility class for accumulating command-line arguments.
+class Arguments {
+ public:
+ Arguments() {
+ args_.push_back(NULL);
+ }
+
+ ~Arguments() {
+ for (std::vector<char*>::iterator i = args_.begin(); i != args_.end();
+ ++i) {
+ free(*i);
+ }
+ }
+ void AddArgument(const char* argument) {
+ args_.insert(args_.end() - 1, posix::StrDup(argument));
+ }
+
+ template <typename Str>
+ void AddArguments(const ::std::vector<Str>& arguments) {
+ for (typename ::std::vector<Str>::const_iterator i = arguments.begin();
+ i != arguments.end();
+ ++i) {
+ args_.insert(args_.end() - 1, posix::StrDup(i->c_str()));
+ }
+ }
+ char* const* Argv() {
+ return &args_[0];
+ }
+
+ private:
+ std::vector<char*> args_;
+};
+
+// A struct that encompasses the arguments to the child process of a
+// threadsafe-style death test process.
+struct ExecDeathTestArgs {
+ char* const* argv; // Command-line arguments for the child's call to exec
+ int close_fd; // File descriptor to close; the read end of a pipe
+};
+
+# if GTEST_OS_MAC
+inline char** GetEnviron() {
+ // When Google Test is built as a framework on MacOS X, the environ variable
+ // is unavailable. Apple's documentation (man environ) recommends using
+ // _NSGetEnviron() instead.
+ return *_NSGetEnviron();
+}
+# else
+// Some POSIX platforms expect you to declare environ. extern "C" makes
+// it reside in the global namespace.
+extern "C" char** environ;
+inline char** GetEnviron() { return environ; }
+# endif // GTEST_OS_MAC
+
+# if !GTEST_OS_QNX
+// The main function for a threadsafe-style death test child process.
+// This function is called in a clone()-ed process and thus must avoid
+// any potentially unsafe operations like malloc or libc functions.
+static int ExecDeathTestChildMain(void* child_arg) {
+ ExecDeathTestArgs* const args = static_cast<ExecDeathTestArgs*>(child_arg);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd));
+
+ // We need to execute the test program in the same environment where
+ // it was originally invoked. Therefore we change to the original
+ // working directory first.
+ const char* const original_dir =
+ UnitTest::GetInstance()->original_working_dir();
+ // We can safely call chdir() as it's a direct system call.
+ if (chdir(original_dir) != 0) {
+ DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
+ GetLastErrnoDescription());
+ return EXIT_FAILURE;
+ }
+
+ // We can safely call execve() as it's a direct system call. We
+ // cannot use execvp() as it's a libc function and thus potentially
+ // unsafe. Since execve() doesn't search the PATH, the user must
+ // invoke the test program via a valid path that contains at least
+ // one path separator.
+ execve(args->argv[0], args->argv, GetEnviron());
+ DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " +
+ original_dir + " failed: " +
+ GetLastErrnoDescription());
+ return EXIT_FAILURE;
+}
+# endif // !GTEST_OS_QNX
+
+// Two utility routines that together determine the direction the stack
+// grows.
+// This could be accomplished more elegantly by a single recursive
+// function, but we want to guard against the unlikely possibility of
+// a smart compiler optimizing the recursion away.
+//
+// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining
+// StackLowerThanAddress into StackGrowsDown, which then doesn't give
+// correct answer.
+void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_;
+void StackLowerThanAddress(const void* ptr, bool* result) {
+ int dummy;
+ *result = (&dummy < ptr);
+}
+
+// Make sure AddressSanitizer does not tamper with the stack here.
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+bool StackGrowsDown() {
+ int dummy;
+ bool result;
+ StackLowerThanAddress(&dummy, &result);
+ return result;
+}
+
+// Spawns a child process with the same executable as the current process in
+// a thread-safe manner and instructs it to run the death test. The
+// implementation uses fork(2) + exec. On systems where clone(2) is
+// available, it is used instead, being slightly more thread-safe. On QNX,
+// fork supports only single-threaded environments, so this function uses
+// spawn(2) there instead. The function dies with an error message if
+// anything goes wrong.
+static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) {
+ ExecDeathTestArgs args = { argv, close_fd };
+ pid_t child_pid = -1;
+
+# if GTEST_OS_QNX
+ // Obtains the current directory and sets it to be closed in the child
+ // process.
+ const int cwd_fd = open(".", O_RDONLY);
+ GTEST_DEATH_TEST_CHECK_(cwd_fd != -1);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC));
+ // We need to execute the test program in the same environment where
+ // it was originally invoked. Therefore we change to the original
+ // working directory first.
+ const char* const original_dir =
+ UnitTest::GetInstance()->original_working_dir();
+ // We can safely call chdir() as it's a direct system call.
+ if (chdir(original_dir) != 0) {
+ DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " +
+ GetLastErrnoDescription());
+ return EXIT_FAILURE;
+ }
+
+ int fd_flags;
+ // Set close_fd to be closed after spawn.
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD));
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD,
+ fd_flags | FD_CLOEXEC));
+ struct inheritance inherit = {0};
+ // spawn is a system call.
+ child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron());
+ // Restores the current working directory.
+ GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd));
+
+# else // GTEST_OS_QNX
+# if GTEST_OS_LINUX
+ // When a SIGPROF signal is received while fork() or clone() are executing,
+ // the process may hang. To avoid this, we ignore SIGPROF here and re-enable
+ // it after the call to fork()/clone() is complete.
+ struct sigaction saved_sigprof_action;
+ struct sigaction ignore_sigprof_action;
+ memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action));
+ sigemptyset(&ignore_sigprof_action.sa_mask);
+ ignore_sigprof_action.sa_handler = SIG_IGN;
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction(
+ SIGPROF, &ignore_sigprof_action, &saved_sigprof_action));
+# endif // GTEST_OS_LINUX
+
+# if GTEST_HAS_CLONE
+ const bool use_fork = GTEST_FLAG(death_test_use_fork);
+
+ if (!use_fork) {
+ static const bool stack_grows_down = StackGrowsDown();
+ const size_t stack_size = getpagesize();
+ // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead.
+ void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE,
+ MAP_ANON | MAP_PRIVATE, -1, 0);
+ GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED);
+
+ // Maximum stack alignment in bytes: For a downward-growing stack, this
+ // amount is subtracted from size of the stack space to get an address
+ // that is within the stack space and is aligned on all systems we care
+ // about. As far as I know there is no ABI with stack alignment greater
+ // than 64. We assume stack and stack_size already have alignment of
+ // kMaxStackAlignment.
+ const size_t kMaxStackAlignment = 64;
+ void* const stack_top =
+ static_cast<char*>(stack) +
+ (stack_grows_down ? stack_size - kMaxStackAlignment : 0);
+ GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment &&
+ reinterpret_cast<intptr_t>(stack_top) % kMaxStackAlignment == 0);
+
+ child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args);
+
+ GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1);
+ }
+# else
+ const bool use_fork = true;
+# endif // GTEST_HAS_CLONE
+
+ if (use_fork && (child_pid = fork()) == 0) {
+ ExecDeathTestChildMain(&args);
+ _exit(0);
+ }
+# endif // GTEST_OS_QNX
+# if GTEST_OS_LINUX
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(
+ sigaction(SIGPROF, &saved_sigprof_action, NULL));
+# endif // GTEST_OS_LINUX
+
+ GTEST_DEATH_TEST_CHECK_(child_pid != -1);
+ return child_pid;
+}
+
+// The AssumeRole process for a fork-and-exec death test. It re-executes the
+// main program from the beginning, setting the --gtest_filter
+// and --gtest_internal_run_death_test flags to cause only the current
+// death test to be re-run.
+DeathTest::TestRole ExecDeathTest::AssumeRole() {
+ const UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const TestInfo* const info = impl->current_test_info();
+ const int death_test_index = info->result()->death_test_count();
+
+ if (flag != NULL) {
+ set_write_fd(flag->write_fd());
+ return EXECUTE_TEST;
+ }
+
+ int pipe_fd[2];
+ GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1);
+ // Clear the close-on-exec flag on the write end of the pipe, lest
+ // it be closed when the child process does an exec:
+ GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1);
+
+ const std::string filter_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "="
+ + info->test_case_name() + "." + info->name();
+ const std::string internal_flag =
+ std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "="
+ + file_ + "|" + StreamableToString(line_) + "|"
+ + StreamableToString(death_test_index) + "|"
+ + StreamableToString(pipe_fd[1]);
+ Arguments args;
+ args.AddArguments(GetArgvsForDeathTestChildProcess());
+ args.AddArgument(filter_flag.c_str());
+ args.AddArgument(internal_flag.c_str());
+
+ DeathTest::set_last_death_test_message("");
+
+ CaptureStderr();
+ // See the comment in NoExecDeathTest::AssumeRole for why the next line
+ // is necessary.
+ FlushInfoLog();
+
+ const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]);
+ GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1]));
+ set_child_pid(child_pid);
+ set_read_fd(pipe_fd[0]);
+ set_spawned(true);
+ return OVERSEE_TEST;
+}
+
+# endif // !GTEST_OS_WINDOWS
+
+// Creates a concrete DeathTest-derived class that depends on the
+// --gtest_death_test_style flag, and sets the pointer pointed to
+// by the "test" argument to its address. If the test should be
+// skipped, sets that pointer to NULL. Returns true, unless the
+// flag is set to an invalid value.
+bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex,
+ const char* file, int line,
+ DeathTest** test) {
+ UnitTestImpl* const impl = GetUnitTestImpl();
+ const InternalRunDeathTestFlag* const flag =
+ impl->internal_run_death_test_flag();
+ const int death_test_index = impl->current_test_info()
+ ->increment_death_test_count();
+
+ if (flag != NULL) {
+ if (death_test_index > flag->index()) {
+ DeathTest::set_last_death_test_message(
+ "Death test count (" + StreamableToString(death_test_index)
+ + ") somehow exceeded expected maximum ("
+ + StreamableToString(flag->index()) + ")");
+ return false;
+ }
+
+ if (!(flag->file() == file && flag->line() == line &&
+ flag->index() == death_test_index)) {
+ *test = NULL;
+ return true;
+ }
+ }
+
+# if GTEST_OS_WINDOWS
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe" ||
+ GTEST_FLAG(death_test_style) == "fast") {
+ *test = new WindowsDeathTest(statement, regex, file, line);
+ }
+
+# else
+
+ if (GTEST_FLAG(death_test_style) == "threadsafe") {
+ *test = new ExecDeathTest(statement, regex, file, line);
+ } else if (GTEST_FLAG(death_test_style) == "fast") {
+ *test = new NoExecDeathTest(statement, regex);
+ }
+
+# endif // GTEST_OS_WINDOWS
+
+ else { // NOLINT - this is more readable than unbalanced brackets inside #if.
+ DeathTest::set_last_death_test_message(
+ "Unknown death test style \"" + GTEST_FLAG(death_test_style)
+ + "\" encountered");
+ return false;
+ }
+
+ return true;
+}
+
+# if GTEST_OS_WINDOWS
+// Recreates the pipe and event handles from the provided parameters,
+// signals the event, and returns a file descriptor wrapped around the pipe
+// handle. This function is called in the child process only.
+int GetStatusFileDescriptor(unsigned int parent_process_id,
+ size_t write_handle_as_size_t,
+ size_t event_handle_as_size_t) {
+ AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE,
+ FALSE, // Non-inheritable.
+ parent_process_id));
+ if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) {
+ DeathTestAbort("Unable to open parent process " +
+ StreamableToString(parent_process_id));
+ }
+
+ // TODO(vladl@google.com): Replace the following check with a
+ // compile-time assertion when available.
+ GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t));
+
+ const HANDLE write_handle =
+ reinterpret_cast<HANDLE>(write_handle_as_size_t);
+ HANDLE dup_write_handle;
+
+ // The newly initialized handle is accessible only in in the parent
+ // process. To obtain one accessible within the child, we need to use
+ // DuplicateHandle.
+ if (!::DuplicateHandle(parent_process_handle.Get(), write_handle,
+ ::GetCurrentProcess(), &dup_write_handle,
+ 0x0, // Requested privileges ignored since
+ // DUPLICATE_SAME_ACCESS is used.
+ FALSE, // Request non-inheritable handler.
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort("Unable to duplicate the pipe handle " +
+ StreamableToString(write_handle_as_size_t) +
+ " from the parent process " +
+ StreamableToString(parent_process_id));
+ }
+
+ const HANDLE event_handle = reinterpret_cast<HANDLE>(event_handle_as_size_t);
+ HANDLE dup_event_handle;
+
+ if (!::DuplicateHandle(parent_process_handle.Get(), event_handle,
+ ::GetCurrentProcess(), &dup_event_handle,
+ 0x0,
+ FALSE,
+ DUPLICATE_SAME_ACCESS)) {
+ DeathTestAbort("Unable to duplicate the event handle " +
+ StreamableToString(event_handle_as_size_t) +
+ " from the parent process " +
+ StreamableToString(parent_process_id));
+ }
+
+ const int write_fd =
+ ::_open_osfhandle(reinterpret_cast<intptr_t>(dup_write_handle), O_APPEND);
+ if (write_fd == -1) {
+ DeathTestAbort("Unable to convert pipe handle " +
+ StreamableToString(write_handle_as_size_t) +
+ " to a file descriptor");
+ }
+
+ // Signals the parent that the write end of the pipe has been acquired
+ // so the parent can release its own write end.
+ ::SetEvent(dup_event_handle);
+
+ return write_fd;
+}
+# endif // GTEST_OS_WINDOWS
+
+// Returns a newly created InternalRunDeathTestFlag object with fields
+// initialized from the GTEST_FLAG(internal_run_death_test) flag if
+// the flag is specified; otherwise returns NULL.
+InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() {
+ if (GTEST_FLAG(internal_run_death_test) == "") return NULL;
+
+ // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we
+ // can use it here.
+ int line = -1;
+ int index = -1;
+ ::std::vector< ::std::string> fields;
+ SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields);
+ int write_fd = -1;
+
+# if GTEST_OS_WINDOWS
+
+ unsigned int parent_process_id = 0;
+ size_t write_handle_as_size_t = 0;
+ size_t event_handle_as_size_t = 0;
+
+ if (fields.size() != 6
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &parent_process_id)
+ || !ParseNaturalNumber(fields[4], &write_handle_as_size_t)
+ || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) {
+ DeathTestAbort("Bad --gtest_internal_run_death_test flag: " +
+ GTEST_FLAG(internal_run_death_test));
+ }
+ write_fd = GetStatusFileDescriptor(parent_process_id,
+ write_handle_as_size_t,
+ event_handle_as_size_t);
+# else
+
+ if (fields.size() != 4
+ || !ParseNaturalNumber(fields[1], &line)
+ || !ParseNaturalNumber(fields[2], &index)
+ || !ParseNaturalNumber(fields[3], &write_fd)) {
+ DeathTestAbort("Bad --gtest_internal_run_death_test flag: "
+ + GTEST_FLAG(internal_run_death_test));
+ }
+
+# endif // GTEST_OS_WINDOWS
+
+ return new InternalRunDeathTestFlag(fields[0], line, index, write_fd);
+}
+
+} // namespace internal
+
+#endif // GTEST_HAS_DEATH_TEST
+
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-filepath.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-filepath.cc
new file mode 100644
index 000000000..0292dc119
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-filepath.cc
@@ -0,0 +1,387 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Authors: keith.ray@gmail.com (Keith Ray)
+
+#include "gtest/gtest-message.h"
+#include "gtest/internal/gtest-filepath.h"
+#include "gtest/internal/gtest-port.h"
+
+#include <stdlib.h>
+
+#if GTEST_OS_WINDOWS_MOBILE
+# include <windows.h>
+#elif GTEST_OS_WINDOWS
+# include <direct.h>
+# include <io.h>
+#elif GTEST_OS_SYMBIAN
+// Symbian OpenC has PATH_MAX in sys/syslimits.h
+# include <sys/syslimits.h>
+#else
+# include <limits.h>
+# include <climits> // Some Linux distributions define PATH_MAX here.
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+#if GTEST_OS_WINDOWS
+# define GTEST_PATH_MAX_ _MAX_PATH
+#elif defined(PATH_MAX)
+# define GTEST_PATH_MAX_ PATH_MAX
+#elif defined(_XOPEN_PATH_MAX)
+# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX
+#else
+# define GTEST_PATH_MAX_ _POSIX_PATH_MAX
+#endif // GTEST_OS_WINDOWS
+
+#include "gtest/internal/gtest-string.h"
+
+namespace testing {
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+// On Windows, '\\' is the standard path separator, but many tools and the
+// Windows API also accept '/' as an alternate path separator. Unless otherwise
+// noted, a file path can contain either kind of path separators, or a mixture
+// of them.
+const char kPathSeparator = '\\';
+const char kAlternatePathSeparator = '/';
+const char kAlternatePathSeparatorString[] = "/";
+# if GTEST_OS_WINDOWS_MOBILE
+// Windows CE doesn't have a current directory. You should not use
+// the current directory in tests on Windows CE, but this at least
+// provides a reasonable fallback.
+const char kCurrentDirectoryString[] = "\\";
+// Windows CE doesn't define INVALID_FILE_ATTRIBUTES
+const DWORD kInvalidFileAttributes = 0xffffffff;
+# else
+const char kCurrentDirectoryString[] = ".\\";
+# endif // GTEST_OS_WINDOWS_MOBILE
+#else
+const char kPathSeparator = '/';
+const char kCurrentDirectoryString[] = "./";
+#endif // GTEST_OS_WINDOWS
+
+// Returns whether the given character is a valid path separator.
+static bool IsPathSeparator(char c) {
+#if GTEST_HAS_ALT_PATH_SEP_
+ return (c == kPathSeparator) || (c == kAlternatePathSeparator);
+#else
+ return c == kPathSeparator;
+#endif
+}
+
+// Returns the current working directory, or "" if unsuccessful.
+FilePath FilePath::GetCurrentDir() {
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
+ // Windows CE doesn't have a current directory, so we just return
+ // something reasonable.
+ return FilePath(kCurrentDirectoryString);
+#elif GTEST_OS_WINDOWS
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd);
+#else
+ char cwd[GTEST_PATH_MAX_ + 1] = { '\0' };
+ char* result = getcwd(cwd, sizeof(cwd));
+# if GTEST_OS_NACL
+ // getcwd will likely fail in NaCl due to the sandbox, so return something
+ // reasonable. The user may have provided a shim implementation for getcwd,
+ // however, so fallback only when failure is detected.
+ return FilePath(result == NULL ? kCurrentDirectoryString : cwd);
+# endif // GTEST_OS_NACL
+ return FilePath(result == NULL ? "" : cwd);
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns a copy of the FilePath with the case-insensitive extension removed.
+// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns
+// FilePath("dir/file"). If a case-insensitive extension is not
+// found, returns a copy of the original FilePath.
+FilePath FilePath::RemoveExtension(const char* extension) const {
+ const std::string dot_extension = std::string(".") + extension;
+ if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) {
+ return FilePath(pathname_.substr(
+ 0, pathname_.length() - dot_extension.length()));
+ }
+ return *this;
+}
+
+// Returns a pointer to the last occurence of a valid path separator in
+// the FilePath. On Windows, for example, both '/' and '\' are valid path
+// separators. Returns NULL if no path separator was found.
+const char* FilePath::FindLastPathSeparator() const {
+ const char* const last_sep = strrchr(c_str(), kPathSeparator);
+#if GTEST_HAS_ALT_PATH_SEP_
+ const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator);
+ // Comparing two pointers of which only one is NULL is undefined.
+ if (last_alt_sep != NULL &&
+ (last_sep == NULL || last_alt_sep > last_sep)) {
+ return last_alt_sep;
+ }
+#endif
+ return last_sep;
+}
+
+// Returns a copy of the FilePath with the directory part removed.
+// Example: FilePath("path/to/file").RemoveDirectoryName() returns
+// FilePath("file"). If there is no directory part ("just_a_file"), it returns
+// the FilePath unmodified. If there is no file part ("just_a_dir/") it
+// returns an empty FilePath ("").
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveDirectoryName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ return last_sep ? FilePath(last_sep + 1) : *this;
+}
+
+// RemoveFileName returns the directory path with the filename removed.
+// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/".
+// If the FilePath is "a_file" or "/a_file", RemoveFileName returns
+// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does
+// not have a file, like "just/a/dir/", it returns the FilePath unmodified.
+// On Windows platform, '\' is the path separator, otherwise it is '/'.
+FilePath FilePath::RemoveFileName() const {
+ const char* const last_sep = FindLastPathSeparator();
+ std::string dir;
+ if (last_sep) {
+ dir = std::string(c_str(), last_sep + 1 - c_str());
+ } else {
+ dir = kCurrentDirectoryString;
+ }
+ return FilePath(dir);
+}
+
+// Helper functions for naming files in a directory for xml output.
+
+// Given directory = "dir", base_name = "test", number = 0,
+// extension = "xml", returns "dir/test.xml". If number is greater
+// than zero (e.g., 12), returns "dir/test_12.xml".
+// On Windows platform, uses \ as the separator rather than /.
+FilePath FilePath::MakeFileName(const FilePath& directory,
+ const FilePath& base_name,
+ int number,
+ const char* extension) {
+ std::string file;
+ if (number == 0) {
+ file = base_name.string() + "." + extension;
+ } else {
+ file = base_name.string() + "_" + StreamableToString(number)
+ + "." + extension;
+ }
+ return ConcatPaths(directory, FilePath(file));
+}
+
+// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml".
+// On Windows, uses \ as the separator rather than /.
+FilePath FilePath::ConcatPaths(const FilePath& directory,
+ const FilePath& relative_path) {
+ if (directory.IsEmpty())
+ return relative_path;
+ const FilePath dir(directory.RemoveTrailingPathSeparator());
+ return FilePath(dir.string() + kPathSeparator + relative_path.string());
+}
+
+// Returns true if pathname describes something findable in the file-system,
+// either a file, directory, or whatever.
+bool FilePath::FileOrDirectoryExists() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ return attributes != kInvalidFileAttributes;
+#else
+ posix::StatStruct file_stat;
+ return posix::Stat(pathname_.c_str(), &file_stat) == 0;
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+// Returns true if pathname describes a directory in the file-system
+// that exists.
+bool FilePath::DirectoryExists() const {
+ bool result = false;
+#if GTEST_OS_WINDOWS
+ // Don't strip off trailing separator if path is a root directory on
+ // Windows (like "C:\\").
+ const FilePath& path(IsRootDirectory() ? *this :
+ RemoveTrailingPathSeparator());
+#else
+ const FilePath& path(*this);
+#endif
+
+#if GTEST_OS_WINDOWS_MOBILE
+ LPCWSTR unicode = String::AnsiToUtf16(path.c_str());
+ const DWORD attributes = GetFileAttributes(unicode);
+ delete [] unicode;
+ if ((attributes != kInvalidFileAttributes) &&
+ (attributes & FILE_ATTRIBUTE_DIRECTORY)) {
+ result = true;
+ }
+#else
+ posix::StatStruct file_stat;
+ result = posix::Stat(path.c_str(), &file_stat) == 0 &&
+ posix::IsDir(file_stat);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ return result;
+}
+
+// Returns true if pathname describes a root directory. (Windows has one
+// root directory per disk drive.)
+bool FilePath::IsRootDirectory() const {
+#if GTEST_OS_WINDOWS
+ // TODO(wan@google.com): on Windows a network share like
+ // \\server\share can be a root directory, although it cannot be the
+ // current directory. Handle this properly.
+ return pathname_.length() == 3 && IsAbsolutePath();
+#else
+ return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]);
+#endif
+}
+
+// Returns true if pathname describes an absolute path.
+bool FilePath::IsAbsolutePath() const {
+ const char* const name = pathname_.c_str();
+#if GTEST_OS_WINDOWS
+ return pathname_.length() >= 3 &&
+ ((name[0] >= 'a' && name[0] <= 'z') ||
+ (name[0] >= 'A' && name[0] <= 'Z')) &&
+ name[1] == ':' &&
+ IsPathSeparator(name[2]);
+#else
+ return IsPathSeparator(name[0]);
+#endif
+}
+
+// Returns a pathname for a file that does not currently exist. The pathname
+// will be directory/base_name.extension or
+// directory/base_name_<number>.extension if directory/base_name.extension
+// already exists. The number will be incremented until a pathname is found
+// that does not already exist.
+// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'.
+// There could be a race condition if two or more processes are calling this
+// function at the same time -- they could both pick the same filename.
+FilePath FilePath::GenerateUniqueFileName(const FilePath& directory,
+ const FilePath& base_name,
+ const char* extension) {
+ FilePath full_pathname;
+ int number = 0;
+ do {
+ full_pathname.Set(MakeFileName(directory, base_name, number++, extension));
+ } while (full_pathname.FileOrDirectoryExists());
+ return full_pathname;
+}
+
+// Returns true if FilePath ends with a path separator, which indicates that
+// it is intended to represent a directory. Returns false otherwise.
+// This does NOT check that a directory (or file) actually exists.
+bool FilePath::IsDirectory() const {
+ return !pathname_.empty() &&
+ IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]);
+}
+
+// Create directories so that path exists. Returns true if successful or if
+// the directories already exist; returns false if unable to create directories
+// for any reason.
+bool FilePath::CreateDirectoriesRecursively() const {
+ if (!this->IsDirectory()) {
+ return false;
+ }
+
+ if (pathname_.length() == 0 || this->DirectoryExists()) {
+ return true;
+ }
+
+ const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName());
+ return parent.CreateDirectoriesRecursively() && this->CreateFolder();
+}
+
+// Create the directory so that path exists. Returns true if successful or
+// if the directory already exists; returns false if unable to create the
+// directory for any reason, including if the parent directory does not
+// exist. Not named "CreateDirectory" because that's a macro on Windows.
+bool FilePath::CreateFolder() const {
+#if GTEST_OS_WINDOWS_MOBILE
+ FilePath removed_sep(this->RemoveTrailingPathSeparator());
+ LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str());
+ int result = CreateDirectory(unicode, NULL) ? 0 : -1;
+ delete [] unicode;
+#elif GTEST_OS_WINDOWS
+ int result = _mkdir(pathname_.c_str());
+#else
+ int result = mkdir(pathname_.c_str(), 0777);
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+ if (result == -1) {
+ return this->DirectoryExists(); // An error is OK if the directory exists.
+ }
+ return true; // No error.
+}
+
+// If input name has a trailing separator character, remove it and return the
+// name, otherwise return the name string unmodified.
+// On Windows platform, uses \ as the separator, other platforms use /.
+FilePath FilePath::RemoveTrailingPathSeparator() const {
+ return IsDirectory()
+ ? FilePath(pathname_.substr(0, pathname_.length() - 1))
+ : *this;
+}
+
+// Removes any redundant separators that might be in the pathname.
+// For example, "bar///foo" becomes "bar/foo". Does not eliminate other
+// redundancies that might be in a pathname involving "." or "..".
+// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share).
+void FilePath::Normalize() {
+ if (pathname_.c_str() == NULL) {
+ pathname_ = "";
+ return;
+ }
+ const char* src = pathname_.c_str();
+ char* const dest = new char[pathname_.length() + 1];
+ char* dest_ptr = dest;
+ memset(dest_ptr, 0, pathname_.length() + 1);
+
+ while (*src != '\0') {
+ *dest_ptr = *src;
+ if (!IsPathSeparator(*src)) {
+ src++;
+ } else {
+#if GTEST_HAS_ALT_PATH_SEP_
+ if (*dest_ptr == kAlternatePathSeparator) {
+ *dest_ptr = kPathSeparator;
+ }
+#endif
+ while (IsPathSeparator(*src))
+ src++;
+ }
+ dest_ptr++;
+ }
+ *dest_ptr = '\0';
+ pathname_ = dest;
+ delete[] dest;
+}
+
+} // namespace internal
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-internal-inl.h b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-internal-inl.h
new file mode 100644
index 000000000..ed8a682a9
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-internal-inl.h
@@ -0,0 +1,1183 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Utility functions and classes used by the Google C++ testing framework.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// This file contains purely Google Test's internal implementation. Please
+// DO NOT #INCLUDE IT IN A USER PROGRAM.
+
+#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_
+#define GTEST_SRC_GTEST_INTERNAL_INL_H_
+
+// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is
+// part of Google Test's implementation; otherwise it's undefined.
+#if !GTEST_IMPLEMENTATION_
+// If this file is included from the user's code, just say no.
+# error "gtest-internal-inl.h is part of Google Test's internal implementation."
+# error "It must not be included except by Google Test itself."
+#endif // GTEST_IMPLEMENTATION_
+
+#ifndef _WIN32_WCE
+# include <errno.h>
+#endif // !_WIN32_WCE
+#include <stddef.h>
+#include <stdlib.h> // For strtoll/_strtoul64/malloc/free.
+#include <string.h> // For memmove.
+
+#include <algorithm>
+#include <string>
+#include <vector>
+
+#include "gtest/internal/gtest-port.h"
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h> // NOLINT
+# include <netdb.h> // NOLINT
+#endif
+
+#if GTEST_OS_WINDOWS
+# include <windows.h> // NOLINT
+#endif // GTEST_OS_WINDOWS
+
+#include "gtest/gtest.h" // NOLINT
+#include "gtest/gtest-spi.h"
+
+namespace testing {
+
+// Declares the flags.
+//
+// We don't want the users to modify this flag in the code, but want
+// Google Test's own unit tests to be able to access it. Therefore we
+// declare it here as opposed to in gtest.h.
+GTEST_DECLARE_bool_(death_test_use_fork);
+
+namespace internal {
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest;
+
+// Names of the flags (needed for parsing Google Test flags).
+const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests";
+const char kBreakOnFailureFlag[] = "break_on_failure";
+const char kCatchExceptionsFlag[] = "catch_exceptions";
+const char kColorFlag[] = "color";
+const char kFilterFlag[] = "filter";
+const char kListTestsFlag[] = "list_tests";
+const char kOutputFlag[] = "output";
+const char kPrintTimeFlag[] = "print_time";
+const char kRandomSeedFlag[] = "random_seed";
+const char kRepeatFlag[] = "repeat";
+const char kShuffleFlag[] = "shuffle";
+const char kStackTraceDepthFlag[] = "stack_trace_depth";
+const char kStreamResultToFlag[] = "stream_result_to";
+const char kThrowOnFailureFlag[] = "throw_on_failure";
+const char kFlagfileFlag[] = "flagfile";
+
+// A valid random seed must be in [1, kMaxRandomSeed].
+const int kMaxRandomSeed = 99999;
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+GTEST_API_ extern bool g_help_flag;
+
+// Returns the current time in milliseconds.
+GTEST_API_ TimeInMillis GetTimeInMillis();
+
+// Returns true iff Google Test should use colors in the output.
+GTEST_API_ bool ShouldUseColor(bool stdout_is_tty);
+
+// Formats the given time in milliseconds as seconds.
+GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms);
+
+// Converts the given time in milliseconds to a date string in the ISO 8601
+// format, without the timezone information. N.B.: due to the use the
+// non-reentrant localtime() function, this function is not thread safe. Do
+// not use it in any code that can be called from multiple threads.
+GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms);
+
+// Parses a string for an Int32 flag, in the form of "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+GTEST_API_ bool ParseInt32Flag(
+ const char* str, const char* flag, Int32* value);
+
+// Returns a random seed in range [1, kMaxRandomSeed] based on the
+// given --gtest_random_seed flag value.
+inline int GetRandomSeedFromFlag(Int32 random_seed_flag) {
+ const unsigned int raw_seed = (random_seed_flag == 0) ?
+ static_cast<unsigned int>(GetTimeInMillis()) :
+ static_cast<unsigned int>(random_seed_flag);
+
+ // Normalizes the actual seed to range [1, kMaxRandomSeed] such that
+ // it's easy to type.
+ const int normalized_seed =
+ static_cast<int>((raw_seed - 1U) %
+ static_cast<unsigned int>(kMaxRandomSeed)) + 1;
+ return normalized_seed;
+}
+
+// Returns the first valid random seed after 'seed'. The behavior is
+// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is
+// considered to be 1.
+inline int GetNextRandomSeed(int seed) {
+ GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed)
+ << "Invalid random seed " << seed << " - must be in [1, "
+ << kMaxRandomSeed << "].";
+ const int next_seed = seed + 1;
+ return (next_seed > kMaxRandomSeed) ? 1 : next_seed;
+}
+
+// This class saves the values of all Google Test flags in its c'tor, and
+// restores them in its d'tor.
+class GTestFlagSaver {
+ public:
+ // The c'tor.
+ GTestFlagSaver() {
+ also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests);
+ break_on_failure_ = GTEST_FLAG(break_on_failure);
+ catch_exceptions_ = GTEST_FLAG(catch_exceptions);
+ color_ = GTEST_FLAG(color);
+ death_test_style_ = GTEST_FLAG(death_test_style);
+ death_test_use_fork_ = GTEST_FLAG(death_test_use_fork);
+ filter_ = GTEST_FLAG(filter);
+ internal_run_death_test_ = GTEST_FLAG(internal_run_death_test);
+ list_tests_ = GTEST_FLAG(list_tests);
+ output_ = GTEST_FLAG(output);
+ print_time_ = GTEST_FLAG(print_time);
+ random_seed_ = GTEST_FLAG(random_seed);
+ repeat_ = GTEST_FLAG(repeat);
+ shuffle_ = GTEST_FLAG(shuffle);
+ stack_trace_depth_ = GTEST_FLAG(stack_trace_depth);
+ stream_result_to_ = GTEST_FLAG(stream_result_to);
+ throw_on_failure_ = GTEST_FLAG(throw_on_failure);
+ }
+
+ // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS.
+ ~GTestFlagSaver() {
+ GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_;
+ GTEST_FLAG(break_on_failure) = break_on_failure_;
+ GTEST_FLAG(catch_exceptions) = catch_exceptions_;
+ GTEST_FLAG(color) = color_;
+ GTEST_FLAG(death_test_style) = death_test_style_;
+ GTEST_FLAG(death_test_use_fork) = death_test_use_fork_;
+ GTEST_FLAG(filter) = filter_;
+ GTEST_FLAG(internal_run_death_test) = internal_run_death_test_;
+ GTEST_FLAG(list_tests) = list_tests_;
+ GTEST_FLAG(output) = output_;
+ GTEST_FLAG(print_time) = print_time_;
+ GTEST_FLAG(random_seed) = random_seed_;
+ GTEST_FLAG(repeat) = repeat_;
+ GTEST_FLAG(shuffle) = shuffle_;
+ GTEST_FLAG(stack_trace_depth) = stack_trace_depth_;
+ GTEST_FLAG(stream_result_to) = stream_result_to_;
+ GTEST_FLAG(throw_on_failure) = throw_on_failure_;
+ }
+
+ private:
+ // Fields for saving the original values of flags.
+ bool also_run_disabled_tests_;
+ bool break_on_failure_;
+ bool catch_exceptions_;
+ std::string color_;
+ std::string death_test_style_;
+ bool death_test_use_fork_;
+ std::string filter_;
+ std::string internal_run_death_test_;
+ bool list_tests_;
+ std::string output_;
+ bool print_time_;
+ internal::Int32 random_seed_;
+ internal::Int32 repeat_;
+ bool shuffle_;
+ internal::Int32 stack_trace_depth_;
+ std::string stream_result_to_;
+ bool throw_on_failure_;
+} GTEST_ATTRIBUTE_UNUSED_;
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+GTEST_API_ std::string CodePointToUtf8(UInt32 code_point);
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars);
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded();
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (e.g., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+GTEST_API_ bool ShouldShard(const char* total_shards_str,
+ const char* shard_index_str,
+ bool in_subprocess_for_death_test);
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error and
+// and aborts.
+GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val);
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+GTEST_API_ bool ShouldRunTestOnShard(
+ int total_shards, int shard_index, int test_id);
+
+// STL container utilities.
+
+// Returns the number of elements in the given container that satisfy
+// the given predicate.
+template <class Container, typename Predicate>
+inline int CountIf(const Container& c, Predicate predicate) {
+ // Implemented as an explicit loop since std::count_if() in libCstd on
+ // Solaris has a non-standard signature.
+ int count = 0;
+ for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) {
+ if (predicate(*it))
+ ++count;
+ }
+ return count;
+}
+
+// Applies a function/functor to each element in the container.
+template <class Container, typename Functor>
+void ForEach(const Container& c, Functor functor) {
+ std::for_each(c.begin(), c.end(), functor);
+}
+
+// Returns the i-th element of the vector, or default_value if i is not
+// in range [0, v.size()).
+template <typename E>
+inline E GetElementOr(const std::vector<E>& v, int i, E default_value) {
+ return (i < 0 || i >= static_cast<int>(v.size())) ? default_value : v[i];
+}
+
+// Performs an in-place shuffle of a range of the vector's elements.
+// 'begin' and 'end' are element indices as an STL-style range;
+// i.e. [begin, end) are shuffled, where 'end' == size() means to
+// shuffle to the end of the vector.
+template <typename E>
+void ShuffleRange(internal::Random* random, int begin, int end,
+ std::vector<E>* v) {
+ const int size = static_cast<int>(v->size());
+ GTEST_CHECK_(0 <= begin && begin <= size)
+ << "Invalid shuffle range start " << begin << ": must be in range [0, "
+ << size << "].";
+ GTEST_CHECK_(begin <= end && end <= size)
+ << "Invalid shuffle range finish " << end << ": must be in range ["
+ << begin << ", " << size << "].";
+
+ // Fisher-Yates shuffle, from
+ // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle
+ for (int range_width = end - begin; range_width >= 2; range_width--) {
+ const int last_in_range = begin + range_width - 1;
+ const int selected = begin + random->Generate(range_width);
+ std::swap((*v)[selected], (*v)[last_in_range]);
+ }
+}
+
+// Performs an in-place shuffle of the vector's elements.
+template <typename E>
+inline void Shuffle(internal::Random* random, std::vector<E>* v) {
+ ShuffleRange(random, 0, static_cast<int>(v->size()), v);
+}
+
+// A function for deleting an object. Handy for being used as a
+// functor.
+template <typename T>
+static void Delete(T* x) {
+ delete x;
+}
+
+// A predicate that checks the key of a TestProperty against a known key.
+//
+// TestPropertyKeyIs is copyable.
+class TestPropertyKeyIs {
+ public:
+ // Constructor.
+ //
+ // TestPropertyKeyIs has NO default constructor.
+ explicit TestPropertyKeyIs(const std::string& key) : key_(key) {}
+
+ // Returns true iff the test name of test property matches on key_.
+ bool operator()(const TestProperty& test_property) const {
+ return test_property.key() == key_;
+ }
+
+ private:
+ std::string key_;
+};
+
+// Class UnitTestOptions.
+//
+// This class contains functions for processing options the user
+// specifies when running the tests. It has only static members.
+//
+// In most cases, the user can specify an option using either an
+// environment variable or a command line flag. E.g. you can set the
+// test filter using either GTEST_FILTER or --gtest_filter. If both
+// the variable and the flag are present, the latter overrides the
+// former.
+class GTEST_API_ UnitTestOptions {
+ public:
+ // Functions for processing the gtest_output flag.
+
+ // Returns the output format, or "" for normal printed output.
+ static std::string GetOutputFormat();
+
+ // Returns the absolute path of the requested output file, or the
+ // default (test_detail.xml in the original working directory) if
+ // none was explicitly specified.
+ static std::string GetAbsolutePathToOutputFile();
+
+ // Functions for processing the gtest_filter flag.
+
+ // Returns true iff the wildcard pattern matches the string. The
+ // first ':' or '\0' character in pattern marks the end of it.
+ //
+ // This recursive algorithm isn't very efficient, but is clear and
+ // works well enough for matching test names, which are short.
+ static bool PatternMatchesString(const char *pattern, const char *str);
+
+ // Returns true iff the user-specified filter matches the test case
+ // name and the test name.
+ static bool FilterMatchesTest(const std::string &test_case_name,
+ const std::string &test_name);
+
+#if GTEST_OS_WINDOWS
+ // Function for supporting the gtest_catch_exception flag.
+
+ // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+ // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+ // This function is useful as an __except condition.
+ static int GTestShouldProcessSEH(DWORD exception_code);
+#endif // GTEST_OS_WINDOWS
+
+ // Returns true if "name" matches the ':' separated list of glob-style
+ // filters in "filter".
+ static bool MatchesFilter(const std::string& name, const char* filter);
+};
+
+// Returns the current application's name, removing directory path if that
+// is present. Used by UnitTestOptions::GetOutputFile.
+GTEST_API_ FilePath GetCurrentExecutableName();
+
+// The role interface for getting the OS stack trace as a string.
+class OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetterInterface() {}
+ virtual ~OsStackTraceGetterInterface() {}
+
+ // Returns the current OS stack trace as an std::string. Parameters:
+ //
+ // max_depth - the maximum number of stack frames to be included
+ // in the trace.
+ // skip_count - the number of top frames to be skipped; doesn't count
+ // against max_depth.
+ virtual string CurrentStackTrace(int max_depth, int skip_count) = 0;
+
+ // UponLeavingGTest() should be called immediately before Google Test calls
+ // user code. It saves some information about the current stack that
+ // CurrentStackTrace() will use to find and hide Google Test stack frames.
+ virtual void UponLeavingGTest() = 0;
+
+ // This string is inserted in place of stack frames that are part of
+ // Google Test's implementation.
+ static const char* const kElidedFramesMarker;
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface);
+};
+
+// A working implementation of the OsStackTraceGetterInterface interface.
+class OsStackTraceGetter : public OsStackTraceGetterInterface {
+ public:
+ OsStackTraceGetter() {}
+
+ virtual string CurrentStackTrace(int max_depth, int skip_count);
+ virtual void UponLeavingGTest();
+
+ private:
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter);
+};
+
+// Information about a Google Test trace point.
+struct TraceInfo {
+ const char* file;
+ int line;
+ std::string message;
+};
+
+// This is the default global test part result reporter used in UnitTestImpl.
+// This class should only be used by UnitTestImpl.
+class DefaultGlobalTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. Reports the test part
+ // result in the current test.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter);
+};
+
+// This is the default per thread test part result reporter used in
+// UnitTestImpl. This class should only be used by UnitTestImpl.
+class DefaultPerThreadTestPartResultReporter
+ : public TestPartResultReporterInterface {
+ public:
+ explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test);
+ // Implements the TestPartResultReporterInterface. The implementation just
+ // delegates to the current global test part result reporter of *unit_test_.
+ virtual void ReportTestPartResult(const TestPartResult& result);
+
+ private:
+ UnitTestImpl* const unit_test_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter);
+};
+
+// The private implementation of the UnitTest class. We don't protect
+// the methods under a mutex, as this class is not accessible by a
+// user and the UnitTest class that delegates work to this class does
+// proper locking.
+class GTEST_API_ UnitTestImpl {
+ public:
+ explicit UnitTestImpl(UnitTest* parent);
+ virtual ~UnitTestImpl();
+
+ // There are two different ways to register your own TestPartResultReporter.
+ // You can register your own repoter to listen either only for test results
+ // from the current thread or for results from all threads.
+ // By default, each per-thread test result repoter just passes a new
+ // TestPartResult to the global test result reporter, which registers the
+ // test part result for the currently running test.
+
+ // Returns the global test part result reporter.
+ TestPartResultReporterInterface* GetGlobalTestPartResultReporter();
+
+ // Sets the global test part result reporter.
+ void SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter);
+
+ // Returns the test part result reporter for the current thread.
+ TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread();
+
+ // Sets the test part result reporter for the current thread.
+ void SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter);
+
+ // Gets the number of successful test cases.
+ int successful_test_case_count() const;
+
+ // Gets the number of failed test cases.
+ int failed_test_case_count() const;
+
+ // Gets the number of all test cases.
+ int total_test_case_count() const;
+
+ // Gets the number of all test cases that contain at least one test
+ // that should run.
+ int test_case_to_run_count() const;
+
+ // Gets the number of successful tests.
+ int successful_test_count() const;
+
+ // Gets the number of failed tests.
+ int failed_test_count() const;
+
+ // Gets the number of disabled tests that will be reported in the XML report.
+ int reportable_disabled_test_count() const;
+
+ // Gets the number of disabled tests.
+ int disabled_test_count() const;
+
+ // Gets the number of tests to be printed in the XML report.
+ int reportable_test_count() const;
+
+ // Gets the number of all tests.
+ int total_test_count() const;
+
+ // Gets the number of tests that should run.
+ int test_to_run_count() const;
+
+ // Gets the time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp() const { return start_timestamp_; }
+
+ // Gets the elapsed time, in milliseconds.
+ TimeInMillis elapsed_time() const { return elapsed_time_; }
+
+ // Returns true iff the unit test passed (i.e. all test cases passed).
+ bool Passed() const { return !Failed(); }
+
+ // Returns true iff the unit test failed (i.e. some test case failed
+ // or something outside of all tests failed).
+ bool Failed() const {
+ return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed();
+ }
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ const TestCase* GetTestCase(int i) const {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[i];
+ }
+
+ // Gets the i-th test case among all the test cases. i can range from 0 to
+ // total_test_case_count() - 1. If i is not in that range, returns NULL.
+ TestCase* GetMutableTestCase(int i) {
+ const int index = GetElementOr(test_case_indices_, i, -1);
+ return index < 0 ? NULL : test_cases_[index];
+ }
+
+ // Provides access to the event listener list.
+ TestEventListeners* listeners() { return &listeners_; }
+
+ // Returns the TestResult for the test that's currently running, or
+ // the TestResult for the ad hoc test if no test is running.
+ TestResult* current_test_result();
+
+ // Returns the TestResult for the ad hoc test.
+ const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; }
+
+ // Sets the OS stack trace getter.
+ //
+ // Does nothing if the input and the current OS stack trace getter
+ // are the same; otherwise, deletes the old getter and makes the
+ // input the current getter.
+ void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter);
+
+ // Returns the current OS stack trace getter if it is not NULL;
+ // otherwise, creates an OsStackTraceGetter, makes it the current
+ // getter, and returns it.
+ OsStackTraceGetterInterface* os_stack_trace_getter();
+
+ // Returns the current OS stack trace as an std::string.
+ //
+ // The maximum number of stack frames to be included is specified by
+ // the gtest_stack_trace_depth flag. The skip_count parameter
+ // specifies the number of top frames to be skipped, which doesn't
+ // count against the number of frames to be included.
+ //
+ // For example, if Foo() calls Bar(), which in turn calls
+ // CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+ // trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+ std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_;
+
+ // Finds and returns a TestCase with the given name. If one doesn't
+ // exist, creates one and returns it.
+ //
+ // Arguments:
+ //
+ // test_case_name: name of the test case
+ // type_param: the name of the test's type parameter, or NULL if
+ // this is not a typed or a type-parameterized test.
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ TestCase* GetTestCase(const char* test_case_name,
+ const char* type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc);
+
+ // Adds a TestInfo to the unit test.
+ //
+ // Arguments:
+ //
+ // set_up_tc: pointer to the function that sets up the test case
+ // tear_down_tc: pointer to the function that tears down the test case
+ // test_info: the TestInfo object
+ void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc,
+ TestInfo* test_info) {
+ // In order to support thread-safe death tests, we need to
+ // remember the original working directory when the test program
+ // was first invoked. We cannot do this in RUN_ALL_TESTS(), as
+ // the user may have changed the current directory before calling
+ // RUN_ALL_TESTS(). Therefore we capture the current directory in
+ // AddTestInfo(), which is called to register a TEST or TEST_F
+ // before main() is reached.
+ if (original_working_dir_.IsEmpty()) {
+ original_working_dir_.Set(FilePath::GetCurrentDir());
+ GTEST_CHECK_(!original_working_dir_.IsEmpty())
+ << "Failed to get the current working directory.";
+ }
+
+ GetTestCase(test_info->test_case_name(),
+ test_info->type_param(),
+ set_up_tc,
+ tear_down_tc)->AddTestInfo(test_info);
+ }
+
+#if GTEST_HAS_PARAM_TEST
+ // Returns ParameterizedTestCaseRegistry object used to keep track of
+ // value-parameterized tests and instantiate and register them.
+ internal::ParameterizedTestCaseRegistry& parameterized_test_registry() {
+ return parameterized_test_registry_;
+ }
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Sets the TestCase object for the test that's currently running.
+ void set_current_test_case(TestCase* a_current_test_case) {
+ current_test_case_ = a_current_test_case;
+ }
+
+ // Sets the TestInfo object for the test that's currently running. If
+ // current_test_info is NULL, the assertion results will be stored in
+ // ad_hoc_test_result_.
+ void set_current_test_info(TestInfo* a_current_test_info) {
+ current_test_info_ = a_current_test_info;
+ }
+
+ // Registers all parameterized tests defined using TEST_P and
+ // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter
+ // combination. This method can be called more then once; it has guards
+ // protecting from registering the tests more then once. If
+ // value-parameterized tests are disabled, RegisterParameterizedTests is
+ // present but does nothing.
+ void RegisterParameterizedTests();
+
+ // Runs all tests in this UnitTest object, prints the result, and
+ // returns true if all tests are successful. If any exception is
+ // thrown during a test, this test is considered to be failed, but
+ // the rest of the tests will still be run.
+ bool RunAllTests();
+
+ // Clears the results of all tests, except the ad hoc tests.
+ void ClearNonAdHocTestResult() {
+ ForEach(test_cases_, TestCase::ClearTestCaseResult);
+ }
+
+ // Clears the results of ad-hoc test assertions.
+ void ClearAdHocTestResult() {
+ ad_hoc_test_result_.Clear();
+ }
+
+ // Adds a TestProperty to the current TestResult object when invoked in a
+ // context of a test or a test case, or to the global property set. If the
+ // result already contains a property with the same key, the value will be
+ // updated.
+ void RecordProperty(const TestProperty& test_property);
+
+ enum ReactionToSharding {
+ HONOR_SHARDING_PROTOCOL,
+ IGNORE_SHARDING_PROTOCOL
+ };
+
+ // Matches the full name of each test against the user-specified
+ // filter to decide whether the test should run, then records the
+ // result in each TestCase and TestInfo object.
+ // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests
+ // based on sharding variables in the environment.
+ // Returns the number of tests that should run.
+ int FilterTests(ReactionToSharding shard_tests);
+
+ // Prints the names of the tests matching the user-specified filter flag.
+ void ListTestsMatchingFilter();
+
+ const TestCase* current_test_case() const { return current_test_case_; }
+ TestInfo* current_test_info() { return current_test_info_; }
+ const TestInfo* current_test_info() const { return current_test_info_; }
+
+ // Returns the vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*>& environments() { return environments_; }
+
+ // Getters for the per-thread Google Test trace stack.
+ std::vector<TraceInfo>& gtest_trace_stack() {
+ return *(gtest_trace_stack_.pointer());
+ }
+ const std::vector<TraceInfo>& gtest_trace_stack() const {
+ return gtest_trace_stack_.get();
+ }
+
+#if GTEST_HAS_DEATH_TEST
+ void InitDeathTestSubprocessControlInfo() {
+ internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag());
+ }
+ // Returns a pointer to the parsed --gtest_internal_run_death_test
+ // flag, or NULL if that flag was not specified.
+ // This information is useful only in a death test child process.
+ // Must not be called before a call to InitGoogleTest.
+ const InternalRunDeathTestFlag* internal_run_death_test_flag() const {
+ return internal_run_death_test_flag_.get();
+ }
+
+ // Returns a pointer to the current death test factory.
+ internal::DeathTestFactory* death_test_factory() {
+ return death_test_factory_.get();
+ }
+
+ void SuppressTestEventsIfInSubprocess();
+
+ friend class ReplaceDeathTestFactory;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Initializes the event listener performing XML output as specified by
+ // UnitTestOptions. Must not be called before InitGoogleTest.
+ void ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+ // Initializes the event listener for streaming test results to a socket.
+ // Must not be called before InitGoogleTest.
+ void ConfigureStreamingOutput();
+#endif
+
+ // Performs initialization dependent upon flag values obtained in
+ // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+ // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+ // this function is also called from RunAllTests. Since this function can be
+ // called more than once, it has to be idempotent.
+ void PostFlagParsingInit();
+
+ // Gets the random seed used at the start of the current test iteration.
+ int random_seed() const { return random_seed_; }
+
+ // Gets the random number generator.
+ internal::Random* random() { return &random_; }
+
+ // Shuffles all test cases, and the tests within each test case,
+ // making sure that death tests are still run first.
+ void ShuffleTests();
+
+ // Restores the test cases and tests to their order before the first shuffle.
+ void UnshuffleTests();
+
+ // Returns the value of GTEST_FLAG(catch_exceptions) at the moment
+ // UnitTest::Run() starts.
+ bool catch_exceptions() const { return catch_exceptions_; }
+
+ private:
+ friend class ::testing::UnitTest;
+
+ // Used by UnitTest::Run() to capture the state of
+ // GTEST_FLAG(catch_exceptions) at the moment it starts.
+ void set_catch_exceptions(bool value) { catch_exceptions_ = value; }
+
+ // The UnitTest object that owns this implementation object.
+ UnitTest* const parent_;
+
+ // The working directory when the first TEST() or TEST_F() was
+ // executed.
+ internal::FilePath original_working_dir_;
+
+ // The default test part result reporters.
+ DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_;
+ DefaultPerThreadTestPartResultReporter
+ default_per_thread_test_part_result_reporter_;
+
+ // Points to (but doesn't own) the global test part result reporter.
+ TestPartResultReporterInterface* global_test_part_result_repoter_;
+
+ // Protects read and write access to global_test_part_result_reporter_.
+ internal::Mutex global_test_part_result_reporter_mutex_;
+
+ // Points to (but doesn't own) the per-thread test part result reporter.
+ internal::ThreadLocal<TestPartResultReporterInterface*>
+ per_thread_test_part_result_reporter_;
+
+ // The vector of environments that need to be set-up/torn-down
+ // before/after the tests are run.
+ std::vector<Environment*> environments_;
+
+ // The vector of TestCases in their original order. It owns the
+ // elements in the vector.
+ std::vector<TestCase*> test_cases_;
+
+ // Provides a level of indirection for the test case list to allow
+ // easy shuffling and restoring the test case order. The i-th
+ // element of this vector is the index of the i-th test case in the
+ // shuffled order.
+ std::vector<int> test_case_indices_;
+
+#if GTEST_HAS_PARAM_TEST
+ // ParameterizedTestRegistry object used to register value-parameterized
+ // tests.
+ internal::ParameterizedTestCaseRegistry parameterized_test_registry_;
+
+ // Indicates whether RegisterParameterizedTests() has been called already.
+ bool parameterized_tests_registered_;
+#endif // GTEST_HAS_PARAM_TEST
+
+ // Index of the last death test case registered. Initially -1.
+ int last_death_test_case_;
+
+ // This points to the TestCase for the currently running test. It
+ // changes as Google Test goes through one test case after another.
+ // When no test is running, this is set to NULL and Google Test
+ // stores assertion results in ad_hoc_test_result_. Initially NULL.
+ TestCase* current_test_case_;
+
+ // This points to the TestInfo for the currently running test. It
+ // changes as Google Test goes through one test after another. When
+ // no test is running, this is set to NULL and Google Test stores
+ // assertion results in ad_hoc_test_result_. Initially NULL.
+ TestInfo* current_test_info_;
+
+ // Normally, a user only writes assertions inside a TEST or TEST_F,
+ // or inside a function called by a TEST or TEST_F. Since Google
+ // Test keeps track of which test is current running, it can
+ // associate such an assertion with the test it belongs to.
+ //
+ // If an assertion is encountered when no TEST or TEST_F is running,
+ // Google Test attributes the assertion result to an imaginary "ad hoc"
+ // test, and records the result in ad_hoc_test_result_.
+ TestResult ad_hoc_test_result_;
+
+ // The list of event listeners that can be used to track events inside
+ // Google Test.
+ TestEventListeners listeners_;
+
+ // The OS stack trace getter. Will be deleted when the UnitTest
+ // object is destructed. By default, an OsStackTraceGetter is used,
+ // but the user can set this field to use a custom getter if that is
+ // desired.
+ OsStackTraceGetterInterface* os_stack_trace_getter_;
+
+ // True iff PostFlagParsingInit() has been called.
+ bool post_flag_parse_init_performed_;
+
+ // The random number seed used at the beginning of the test run.
+ int random_seed_;
+
+ // Our random number generator.
+ internal::Random random_;
+
+ // The time of the test program start, in ms from the start of the
+ // UNIX epoch.
+ TimeInMillis start_timestamp_;
+
+ // How long the test took to run, in milliseconds.
+ TimeInMillis elapsed_time_;
+
+#if GTEST_HAS_DEATH_TEST
+ // The decomposed components of the gtest_internal_run_death_test flag,
+ // parsed when RUN_ALL_TESTS is called.
+ internal::scoped_ptr<InternalRunDeathTestFlag> internal_run_death_test_flag_;
+ internal::scoped_ptr<internal::DeathTestFactory> death_test_factory_;
+#endif // GTEST_HAS_DEATH_TEST
+
+ // A per-thread stack of traces created by the SCOPED_TRACE() macro.
+ internal::ThreadLocal<std::vector<TraceInfo> > gtest_trace_stack_;
+
+ // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests()
+ // starts.
+ bool catch_exceptions_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl);
+}; // class UnitTestImpl
+
+// Convenience function for accessing the global UnitTest
+// implementation object.
+inline UnitTestImpl* GetUnitTestImpl() {
+ return UnitTest::GetInstance()->impl();
+}
+
+#if GTEST_USES_SIMPLE_RE
+
+// Internal helper functions for implementing the simple regular
+// expression matcher.
+GTEST_API_ bool IsInSet(char ch, const char* str);
+GTEST_API_ bool IsAsciiDigit(char ch);
+GTEST_API_ bool IsAsciiPunct(char ch);
+GTEST_API_ bool IsRepeat(char ch);
+GTEST_API_ bool IsAsciiWhiteSpace(char ch);
+GTEST_API_ bool IsAsciiWordChar(char ch);
+GTEST_API_ bool IsValidEscape(char ch);
+GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch);
+GTEST_API_ bool ValidateRegex(const char* regex);
+GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str);
+GTEST_API_ bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char ch, char repeat, const char* regex, const char* str);
+GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str);
+
+#endif // GTEST_USES_SIMPLE_RE
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv);
+GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv);
+
+#if GTEST_HAS_DEATH_TEST
+
+// Returns the message describing the last system error, regardless of the
+// platform.
+GTEST_API_ std::string GetLastErrnoDescription();
+
+// Attempts to parse a string into a positive integer pointed to by the
+// number parameter. Returns true if that is possible.
+// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use
+// it here.
+template <typename Integer>
+bool ParseNaturalNumber(const ::std::string& str, Integer* number) {
+ // Fail fast if the given string does not begin with a digit;
+ // this bypasses strtoXXX's "optional leading whitespace and plus
+ // or minus sign" semantics, which are undesirable here.
+ if (str.empty() || !IsDigit(str[0])) {
+ return false;
+ }
+ errno = 0;
+
+ char* end;
+ // BiggestConvertible is the largest integer type that system-provided
+ // string-to-number conversion routines can return.
+
+# if GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+ // MSVC and C++ Builder define __int64 instead of the standard long long.
+ typedef unsigned __int64 BiggestConvertible;
+ const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10);
+
+# else
+
+ typedef unsigned long long BiggestConvertible; // NOLINT
+ const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10);
+
+# endif // GTEST_OS_WINDOWS && !defined(__GNUC__)
+
+ const bool parse_success = *end == '\0' && errno == 0;
+
+ // TODO(vladl@google.com): Convert this to compile time assertion when it is
+ // available.
+ GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed));
+
+ const Integer result = static_cast<Integer>(parsed);
+ if (parse_success && static_cast<BiggestConvertible>(result) == parsed) {
+ *number = result;
+ return true;
+ }
+ return false;
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// TestResult contains some private methods that should be hidden from
+// Google Test user but are required for testing. This class allow our tests
+// to access them.
+//
+// This class is supplied only for the purpose of testing Google Test's own
+// constructs. Do not use it in user tests, either directly or indirectly.
+class TestResultAccessor {
+ public:
+ static void RecordProperty(TestResult* test_result,
+ const std::string& xml_element,
+ const TestProperty& property) {
+ test_result->RecordProperty(xml_element, property);
+ }
+
+ static void ClearTestPartResults(TestResult* test_result) {
+ test_result->ClearTestPartResults();
+ }
+
+ static const std::vector<testing::TestPartResult>& test_part_results(
+ const TestResult& test_result) {
+ return test_result.test_part_results();
+ }
+};
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Streams test results to the given port on the given host machine.
+class GTEST_API_ StreamingListener : public EmptyTestEventListener {
+ public:
+ // Abstract base class for writing strings to a socket.
+ class AbstractSocketWriter {
+ public:
+ virtual ~AbstractSocketWriter() {}
+
+ // Sends a string to the socket.
+ virtual void Send(const string& message) = 0;
+
+ // Closes the socket.
+ virtual void CloseConnection() {}
+
+ // Sends a string and a newline to the socket.
+ void SendLn(const string& message) {
+ Send(message + "\n");
+ }
+ };
+
+ // Concrete class for actually writing strings to a socket.
+ class SocketWriter : public AbstractSocketWriter {
+ public:
+ SocketWriter(const string& host, const string& port)
+ : sockfd_(-1), host_name_(host), port_num_(port) {
+ MakeConnection();
+ }
+
+ virtual ~SocketWriter() {
+ if (sockfd_ != -1)
+ CloseConnection();
+ }
+
+ // Sends a string to the socket.
+ virtual void Send(const string& message) {
+ GTEST_CHECK_(sockfd_ != -1)
+ << "Send() can be called only when there is a connection.";
+
+ const int len = static_cast<int>(message.length());
+ if (write(sockfd_, message.c_str(), len) != len) {
+ GTEST_LOG_(WARNING)
+ << "stream_result_to: failed to stream to "
+ << host_name_ << ":" << port_num_;
+ }
+ }
+
+ private:
+ // Creates a client socket and connects to the server.
+ void MakeConnection();
+
+ // Closes the socket.
+ void CloseConnection() {
+ GTEST_CHECK_(sockfd_ != -1)
+ << "CloseConnection() can be called only when there is a connection.";
+
+ close(sockfd_);
+ sockfd_ = -1;
+ }
+
+ int sockfd_; // socket file descriptor
+ const string host_name_;
+ const string port_num_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter);
+ }; // class SocketWriter
+
+ // Escapes '=', '&', '%', and '\n' characters in str as "%xx".
+ static string UrlEncode(const char* str);
+
+ StreamingListener(const string& host, const string& port)
+ : socket_writer_(new SocketWriter(host, port)) { Start(); }
+
+ explicit StreamingListener(AbstractSocketWriter* socket_writer)
+ : socket_writer_(socket_writer) { Start(); }
+
+ void OnTestProgramStart(const UnitTest& /* unit_test */) {
+ SendLn("event=TestProgramStart");
+ }
+
+ void OnTestProgramEnd(const UnitTest& unit_test) {
+ // Note that Google Test current only report elapsed time for each
+ // test iteration, not for the entire test program.
+ SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed()));
+
+ // Notify the streaming server to stop.
+ socket_writer_->CloseConnection();
+ }
+
+ void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) {
+ SendLn("event=TestIterationStart&iteration=" +
+ StreamableToString(iteration));
+ }
+
+ void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) {
+ SendLn("event=TestIterationEnd&passed=" +
+ FormatBool(unit_test.Passed()) + "&elapsed_time=" +
+ StreamableToString(unit_test.elapsed_time()) + "ms");
+ }
+
+ void OnTestCaseStart(const TestCase& test_case) {
+ SendLn(std::string("event=TestCaseStart&name=") + test_case.name());
+ }
+
+ void OnTestCaseEnd(const TestCase& test_case) {
+ SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed())
+ + "&elapsed_time=" + StreamableToString(test_case.elapsed_time())
+ + "ms");
+ }
+
+ void OnTestStart(const TestInfo& test_info) {
+ SendLn(std::string("event=TestStart&name=") + test_info.name());
+ }
+
+ void OnTestEnd(const TestInfo& test_info) {
+ SendLn("event=TestEnd&passed=" +
+ FormatBool((test_info.result())->Passed()) +
+ "&elapsed_time=" +
+ StreamableToString((test_info.result())->elapsed_time()) + "ms");
+ }
+
+ void OnTestPartResult(const TestPartResult& test_part_result) {
+ const char* file_name = test_part_result.file_name();
+ if (file_name == NULL)
+ file_name = "";
+ SendLn("event=TestPartResult&file=" + UrlEncode(file_name) +
+ "&line=" + StreamableToString(test_part_result.line_number()) +
+ "&message=" + UrlEncode(test_part_result.message()));
+ }
+
+ private:
+ // Sends the given message and a newline to the socket.
+ void SendLn(const string& message) { socket_writer_->SendLn(message); }
+
+ // Called at the start of streaming to notify the receiver what
+ // protocol we are using.
+ void Start() { SendLn("gtest_streaming_protocol_version=1.0"); }
+
+ string FormatBool(bool value) { return value ? "1" : "0"; }
+
+ const scoped_ptr<AbstractSocketWriter> socket_writer_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener);
+}; // class StreamingListener
+
+#endif // GTEST_CAN_STREAM_RESULTS_
+
+} // namespace internal
+} // namespace testing
+
+#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-port.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-port.cc
new file mode 100644
index 000000000..e5bf3dd2b
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-port.cc
@@ -0,0 +1,1259 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#include "gtest/internal/gtest-port.h"
+
+#include <limits.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fstream>
+
+#if GTEST_OS_WINDOWS
+# include <windows.h>
+# include <io.h>
+# include <sys/stat.h>
+# include <map> // Used in ThreadLocal.
+#else
+# include <unistd.h>
+#endif // GTEST_OS_WINDOWS
+
+#if GTEST_OS_MAC
+# include <mach/mach_init.h>
+# include <mach/task.h>
+# include <mach/vm_map.h>
+#endif // GTEST_OS_MAC
+
+#if GTEST_OS_QNX
+# include <devctl.h>
+# include <fcntl.h>
+# include <sys/procfs.h>
+#endif // GTEST_OS_QNX
+
+#if GTEST_OS_AIX
+# include <procinfo.h>
+# include <sys/types.h>
+#endif // GTEST_OS_AIX
+
+#include "gtest/gtest-spi.h"
+#include "gtest/gtest-message.h"
+#include "gtest/internal/gtest-internal.h"
+#include "gtest/internal/gtest-string.h"
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick exists to
+// prevent the accidental inclusion of gtest-internal-inl.h in the
+// user's code.
+#define GTEST_IMPLEMENTATION_ 1
+#include "src/gtest-internal-inl.h"
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+namespace internal {
+
+#if defined(_MSC_VER) || defined(__BORLANDC__)
+// MSVC and C++Builder do not provide a definition of STDERR_FILENO.
+const int kStdOutFileno = 1;
+const int kStdErrFileno = 2;
+#else
+const int kStdOutFileno = STDOUT_FILENO;
+const int kStdErrFileno = STDERR_FILENO;
+#endif // _MSC_VER
+
+#if GTEST_OS_LINUX
+
+namespace {
+template <typename T>
+T ReadProcFileField(const string& filename, int field) {
+ std::string dummy;
+ std::ifstream file(filename.c_str());
+ while (field-- > 0) {
+ file >> dummy;
+ }
+ T output = 0;
+ file >> output;
+ return output;
+}
+} // namespace
+
+// Returns the number of active threads, or 0 when there is an error.
+size_t GetThreadCount() {
+ const string filename =
+ (Message() << "/proc/" << getpid() << "/stat").GetString();
+ return ReadProcFileField<int>(filename, 19);
+}
+
+#elif GTEST_OS_MAC
+
+size_t GetThreadCount() {
+ const task_t task = mach_task_self();
+ mach_msg_type_number_t thread_count;
+ thread_act_array_t thread_list;
+ const kern_return_t status = task_threads(task, &thread_list, &thread_count);
+ if (status == KERN_SUCCESS) {
+ // task_threads allocates resources in thread_list and we need to free them
+ // to avoid leaks.
+ vm_deallocate(task,
+ reinterpret_cast<vm_address_t>(thread_list),
+ sizeof(thread_t) * thread_count);
+ return static_cast<size_t>(thread_count);
+ } else {
+ return 0;
+ }
+}
+
+#elif GTEST_OS_QNX
+
+// Returns the number of threads running in the process, or 0 to indicate that
+// we cannot detect it.
+size_t GetThreadCount() {
+ const int fd = open("/proc/self/as", O_RDONLY);
+ if (fd < 0) {
+ return 0;
+ }
+ procfs_info process_info;
+ const int status =
+ devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL);
+ close(fd);
+ if (status == EOK) {
+ return static_cast<size_t>(process_info.num_threads);
+ } else {
+ return 0;
+ }
+}
+
+#elif GTEST_OS_AIX
+
+size_t GetThreadCount() {
+ struct procentry64 entry;
+ pid_t pid = getpid();
+ int status = getprocs64(&entry, sizeof(entry), NULL, 0, &pid, 1);
+ if (status == 1) {
+ return entry.pi_thcount;
+ } else {
+ return 0;
+ }
+}
+
+#else
+
+size_t GetThreadCount() {
+ // There's no portable way to detect the number of threads, so we just
+ // return 0 to indicate that we cannot detect it.
+ return 0;
+}
+
+#endif // GTEST_OS_LINUX
+
+#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
+
+void SleepMilliseconds(int n) {
+ ::Sleep(n);
+}
+
+AutoHandle::AutoHandle()
+ : handle_(INVALID_HANDLE_VALUE) {}
+
+AutoHandle::AutoHandle(Handle handle)
+ : handle_(handle) {}
+
+AutoHandle::~AutoHandle() {
+ Reset();
+}
+
+AutoHandle::Handle AutoHandle::Get() const {
+ return handle_;
+}
+
+void AutoHandle::Reset() {
+ Reset(INVALID_HANDLE_VALUE);
+}
+
+void AutoHandle::Reset(HANDLE handle) {
+ // Resetting with the same handle we already own is invalid.
+ if (handle_ != handle) {
+ if (IsCloseable()) {
+ ::CloseHandle(handle_);
+ }
+ handle_ = handle;
+ } else {
+ GTEST_CHECK_(!IsCloseable())
+ << "Resetting a valid handle to itself is likely a programmer error "
+ "and thus not allowed.";
+ }
+}
+
+bool AutoHandle::IsCloseable() const {
+ // Different Windows APIs may use either of these values to represent an
+ // invalid handle.
+ return handle_ != NULL && handle_ != INVALID_HANDLE_VALUE;
+}
+
+Notification::Notification()
+ : event_(::CreateEvent(NULL, // Default security attributes.
+ TRUE, // Do not reset automatically.
+ FALSE, // Initially unset.
+ NULL)) { // Anonymous event.
+ GTEST_CHECK_(event_.Get() != NULL);
+}
+
+void Notification::Notify() {
+ GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE);
+}
+
+void Notification::WaitForNotification() {
+ GTEST_CHECK_(
+ ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0);
+}
+
+Mutex::Mutex()
+ : owner_thread_id_(0),
+ type_(kDynamic),
+ critical_section_init_phase_(0),
+ critical_section_(new CRITICAL_SECTION) {
+ ::InitializeCriticalSection(critical_section_);
+}
+
+Mutex::~Mutex() {
+ // Static mutexes are leaked intentionally. It is not thread-safe to try
+ // to clean them up.
+ // TODO(yukawa): Switch to Slim Reader/Writer (SRW) Locks, which requires
+ // nothing to clean it up but is available only on Vista and later.
+ // http://msdn.microsoft.com/en-us/library/windows/desktop/aa904937.aspx
+ if (type_ == kDynamic) {
+ ::DeleteCriticalSection(critical_section_);
+ delete critical_section_;
+ critical_section_ = NULL;
+ }
+}
+
+void Mutex::Lock() {
+ ThreadSafeLazyInit();
+ ::EnterCriticalSection(critical_section_);
+ owner_thread_id_ = ::GetCurrentThreadId();
+}
+
+void Mutex::Unlock() {
+ ThreadSafeLazyInit();
+ // We don't protect writing to owner_thread_id_ here, as it's the
+ // caller's responsibility to ensure that the current thread holds the
+ // mutex when this is called.
+ owner_thread_id_ = 0;
+ ::LeaveCriticalSection(critical_section_);
+}
+
+// Does nothing if the current thread holds the mutex. Otherwise, crashes
+// with high probability.
+void Mutex::AssertHeld() {
+ ThreadSafeLazyInit();
+ GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId())
+ << "The current thread is not holding the mutex @" << this;
+}
+
+// Initializes owner_thread_id_ and critical_section_ in static mutexes.
+void Mutex::ThreadSafeLazyInit() {
+ // Dynamic mutexes are initialized in the constructor.
+ if (type_ == kStatic) {
+ switch (
+ ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) {
+ case 0:
+ // If critical_section_init_phase_ was 0 before the exchange, we
+ // are the first to test it and need to perform the initialization.
+ owner_thread_id_ = 0;
+ critical_section_ = new CRITICAL_SECTION;
+ ::InitializeCriticalSection(critical_section_);
+ // Updates the critical_section_init_phase_ to 2 to signal
+ // initialization complete.
+ GTEST_CHECK_(::InterlockedCompareExchange(
+ &critical_section_init_phase_, 2L, 1L) ==
+ 1L);
+ break;
+ case 1:
+ // Somebody else is already initializing the mutex; spin until they
+ // are done.
+ while (::InterlockedCompareExchange(&critical_section_init_phase_,
+ 2L,
+ 2L) != 2L) {
+ // Possibly yields the rest of the thread's time slice to other
+ // threads.
+ ::Sleep(0);
+ }
+ break;
+
+ case 2:
+ break; // The mutex is already initialized and ready for use.
+
+ default:
+ GTEST_CHECK_(false)
+ << "Unexpected value of critical_section_init_phase_ "
+ << "while initializing a static mutex.";
+ }
+ }
+}
+
+namespace {
+
+class ThreadWithParamSupport : public ThreadWithParamBase {
+ public:
+ static HANDLE CreateThread(Runnable* runnable,
+ Notification* thread_can_start) {
+ ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start);
+ DWORD thread_id;
+ // TODO(yukawa): Consider to use _beginthreadex instead.
+ HANDLE thread_handle = ::CreateThread(
+ NULL, // Default security.
+ 0, // Default stack size.
+ &ThreadWithParamSupport::ThreadMain,
+ param, // Parameter to ThreadMainStatic
+ 0x0, // Default creation flags.
+ &thread_id); // Need a valid pointer for the call to work under Win98.
+ GTEST_CHECK_(thread_handle != NULL) << "CreateThread failed with error "
+ << ::GetLastError() << ".";
+ if (thread_handle == NULL) {
+ delete param;
+ }
+ return thread_handle;
+ }
+
+ private:
+ struct ThreadMainParam {
+ ThreadMainParam(Runnable* runnable, Notification* thread_can_start)
+ : runnable_(runnable),
+ thread_can_start_(thread_can_start) {
+ }
+ scoped_ptr<Runnable> runnable_;
+ // Does not own.
+ Notification* thread_can_start_;
+ };
+
+ static DWORD WINAPI ThreadMain(void* ptr) {
+ // Transfers ownership.
+ scoped_ptr<ThreadMainParam> param(static_cast<ThreadMainParam*>(ptr));
+ if (param->thread_can_start_ != NULL)
+ param->thread_can_start_->WaitForNotification();
+ param->runnable_->Run();
+ return 0;
+ }
+
+ // Prohibit instantiation.
+ ThreadWithParamSupport();
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport);
+};
+
+} // namespace
+
+ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable,
+ Notification* thread_can_start)
+ : thread_(ThreadWithParamSupport::CreateThread(runnable,
+ thread_can_start)) {
+}
+
+ThreadWithParamBase::~ThreadWithParamBase() {
+ Join();
+}
+
+void ThreadWithParamBase::Join() {
+ GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0)
+ << "Failed to join the thread with error " << ::GetLastError() << ".";
+}
+
+// Maps a thread to a set of ThreadIdToThreadLocals that have values
+// instantiated on that thread and notifies them when the thread exits. A
+// ThreadLocal instance is expected to persist until all threads it has
+// values on have terminated.
+class ThreadLocalRegistryImpl {
+ public:
+ // Registers thread_local_instance as having value on the current thread.
+ // Returns a value that can be used to identify the thread from other threads.
+ static ThreadLocalValueHolderBase* GetValueOnCurrentThread(
+ const ThreadLocalBase* thread_local_instance) {
+ DWORD current_thread = ::GetCurrentThreadId();
+ MutexLock lock(&mutex_);
+ ThreadIdToThreadLocals* const thread_to_thread_locals =
+ GetThreadLocalsMapLocked();
+ ThreadIdToThreadLocals::iterator thread_local_pos =
+ thread_to_thread_locals->find(current_thread);
+ if (thread_local_pos == thread_to_thread_locals->end()) {
+ thread_local_pos = thread_to_thread_locals->insert(
+ std::make_pair(current_thread, ThreadLocalValues())).first;
+ StartWatcherThreadFor(current_thread);
+ }
+ ThreadLocalValues& thread_local_values = thread_local_pos->second;
+ ThreadLocalValues::iterator value_pos =
+ thread_local_values.find(thread_local_instance);
+ if (value_pos == thread_local_values.end()) {
+ value_pos =
+ thread_local_values
+ .insert(std::make_pair(
+ thread_local_instance,
+ linked_ptr<ThreadLocalValueHolderBase>(
+ thread_local_instance->NewValueForCurrentThread())))
+ .first;
+ }
+ return value_pos->second.get();
+ }
+
+ static void OnThreadLocalDestroyed(
+ const ThreadLocalBase* thread_local_instance) {
+ std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;
+ // Clean up the ThreadLocalValues data structure while holding the lock, but
+ // defer the destruction of the ThreadLocalValueHolderBases.
+ {
+ MutexLock lock(&mutex_);
+ ThreadIdToThreadLocals* const thread_to_thread_locals =
+ GetThreadLocalsMapLocked();
+ for (ThreadIdToThreadLocals::iterator it =
+ thread_to_thread_locals->begin();
+ it != thread_to_thread_locals->end();
+ ++it) {
+ ThreadLocalValues& thread_local_values = it->second;
+ ThreadLocalValues::iterator value_pos =
+ thread_local_values.find(thread_local_instance);
+ if (value_pos != thread_local_values.end()) {
+ value_holders.push_back(value_pos->second);
+ thread_local_values.erase(value_pos);
+ // This 'if' can only be successful at most once, so theoretically we
+ // could break out of the loop here, but we don't bother doing so.
+ }
+ }
+ }
+ // Outside the lock, let the destructor for 'value_holders' deallocate the
+ // ThreadLocalValueHolderBases.
+ }
+
+ static void OnThreadExit(DWORD thread_id) {
+ GTEST_CHECK_(thread_id != 0) << ::GetLastError();
+ std::vector<linked_ptr<ThreadLocalValueHolderBase> > value_holders;
+ // Clean up the ThreadIdToThreadLocals data structure while holding the
+ // lock, but defer the destruction of the ThreadLocalValueHolderBases.
+ {
+ MutexLock lock(&mutex_);
+ ThreadIdToThreadLocals* const thread_to_thread_locals =
+ GetThreadLocalsMapLocked();
+ ThreadIdToThreadLocals::iterator thread_local_pos =
+ thread_to_thread_locals->find(thread_id);
+ if (thread_local_pos != thread_to_thread_locals->end()) {
+ ThreadLocalValues& thread_local_values = thread_local_pos->second;
+ for (ThreadLocalValues::iterator value_pos =
+ thread_local_values.begin();
+ value_pos != thread_local_values.end();
+ ++value_pos) {
+ value_holders.push_back(value_pos->second);
+ }
+ thread_to_thread_locals->erase(thread_local_pos);
+ }
+ }
+ // Outside the lock, let the destructor for 'value_holders' deallocate the
+ // ThreadLocalValueHolderBases.
+ }
+
+ private:
+ // In a particular thread, maps a ThreadLocal object to its value.
+ typedef std::map<const ThreadLocalBase*,
+ linked_ptr<ThreadLocalValueHolderBase> > ThreadLocalValues;
+ // Stores all ThreadIdToThreadLocals having values in a thread, indexed by
+ // thread's ID.
+ typedef std::map<DWORD, ThreadLocalValues> ThreadIdToThreadLocals;
+
+ // Holds the thread id and thread handle that we pass from
+ // StartWatcherThreadFor to WatcherThreadFunc.
+ typedef std::pair<DWORD, HANDLE> ThreadIdAndHandle;
+
+ static void StartWatcherThreadFor(DWORD thread_id) {
+ // The returned handle will be kept in thread_map and closed by
+ // watcher_thread in WatcherThreadFunc.
+ HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION,
+ FALSE,
+ thread_id);
+ GTEST_CHECK_(thread != NULL);
+ // We need to to pass a valid thread ID pointer into CreateThread for it
+ // to work correctly under Win98.
+ DWORD watcher_thread_id;
+ HANDLE watcher_thread = ::CreateThread(
+ NULL, // Default security.
+ 0, // Default stack size
+ &ThreadLocalRegistryImpl::WatcherThreadFunc,
+ reinterpret_cast<LPVOID>(new ThreadIdAndHandle(thread_id, thread)),
+ CREATE_SUSPENDED,
+ &watcher_thread_id);
+ GTEST_CHECK_(watcher_thread != NULL);
+ // Give the watcher thread the same priority as ours to avoid being
+ // blocked by it.
+ ::SetThreadPriority(watcher_thread,
+ ::GetThreadPriority(::GetCurrentThread()));
+ ::ResumeThread(watcher_thread);
+ ::CloseHandle(watcher_thread);
+ }
+
+ // Monitors exit from a given thread and notifies those
+ // ThreadIdToThreadLocals about thread termination.
+ static DWORD WINAPI WatcherThreadFunc(LPVOID param) {
+ const ThreadIdAndHandle* tah =
+ reinterpret_cast<const ThreadIdAndHandle*>(param);
+ GTEST_CHECK_(
+ ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0);
+ OnThreadExit(tah->first);
+ ::CloseHandle(tah->second);
+ delete tah;
+ return 0;
+ }
+
+ // Returns map of thread local instances.
+ static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() {
+ mutex_.AssertHeld();
+ static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals;
+ return map;
+ }
+
+ // Protects access to GetThreadLocalsMapLocked() and its return value.
+ static Mutex mutex_;
+ // Protects access to GetThreadMapLocked() and its return value.
+ static Mutex thread_map_mutex_;
+};
+
+Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex);
+Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex);
+
+ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread(
+ const ThreadLocalBase* thread_local_instance) {
+ return ThreadLocalRegistryImpl::GetValueOnCurrentThread(
+ thread_local_instance);
+}
+
+void ThreadLocalRegistry::OnThreadLocalDestroyed(
+ const ThreadLocalBase* thread_local_instance) {
+ ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance);
+}
+
+#endif // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS
+
+#if GTEST_USES_POSIX_RE
+
+// Implements RE. Currently only needed for death tests.
+
+RE::~RE() {
+ if (is_valid_) {
+ // regfree'ing an invalid regex might crash because the content
+ // of the regex is undefined. Since the regex's are essentially
+ // the same, one cannot be valid (or invalid) without the other
+ // being so too.
+ regfree(&partial_regex_);
+ regfree(&full_regex_);
+ }
+ free(const_cast<char*>(pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.full_regex_, str, 1, &match, 0) == 0;
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ if (!re.is_valid_) return false;
+
+ regmatch_t match;
+ return regexec(&re.partial_regex_, str, 1, &match, 0) == 0;
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = posix::StrDup(regex);
+
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match.
+ const size_t full_regex_len = strlen(regex) + 10;
+ char* const full_pattern = new char[full_regex_len];
+
+ snprintf(full_pattern, full_regex_len, "^(%s)$", regex);
+ is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0;
+ // We want to call regcomp(&partial_regex_, ...) even if the
+ // previous expression returns false. Otherwise partial_regex_ may
+ // not be properly initialized can may cause trouble when it's
+ // freed.
+ //
+ // Some implementation of POSIX regex (e.g. on at least some
+ // versions of Cygwin) doesn't accept the empty string as a valid
+ // regex. We change it to an equivalent form "()" to be safe.
+ if (is_valid_) {
+ const char* const partial_regex = (*regex == '\0') ? "()" : regex;
+ is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0;
+ }
+ EXPECT_TRUE(is_valid_)
+ << "Regular expression \"" << regex
+ << "\" is not a valid POSIX Extended regular expression.";
+
+ delete[] full_pattern;
+}
+
+#elif GTEST_USES_SIMPLE_RE
+
+// Returns true iff ch appears anywhere in str (excluding the
+// terminating '\0' character).
+bool IsInSet(char ch, const char* str) {
+ return ch != '\0' && strchr(str, ch) != NULL;
+}
+
+// Returns true iff ch belongs to the given classification. Unlike
+// similar functions in <ctype.h>, these aren't affected by the
+// current locale.
+bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; }
+bool IsAsciiPunct(char ch) {
+ return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~");
+}
+bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); }
+bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); }
+bool IsAsciiWordChar(char ch) {
+ return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') ||
+ ('0' <= ch && ch <= '9') || ch == '_';
+}
+
+// Returns true iff "\\c" is a supported escape sequence.
+bool IsValidEscape(char c) {
+ return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW"));
+}
+
+// Returns true iff the given atom (specified by escaped and pattern)
+// matches ch. The result is undefined if the atom is invalid.
+bool AtomMatchesChar(bool escaped, char pattern_char, char ch) {
+ if (escaped) { // "\\p" where p is pattern_char.
+ switch (pattern_char) {
+ case 'd': return IsAsciiDigit(ch);
+ case 'D': return !IsAsciiDigit(ch);
+ case 'f': return ch == '\f';
+ case 'n': return ch == '\n';
+ case 'r': return ch == '\r';
+ case 's': return IsAsciiWhiteSpace(ch);
+ case 'S': return !IsAsciiWhiteSpace(ch);
+ case 't': return ch == '\t';
+ case 'v': return ch == '\v';
+ case 'w': return IsAsciiWordChar(ch);
+ case 'W': return !IsAsciiWordChar(ch);
+ }
+ return IsAsciiPunct(pattern_char) && pattern_char == ch;
+ }
+
+ return (pattern_char == '.' && ch != '\n') || pattern_char == ch;
+}
+
+// Helper function used by ValidateRegex() to format error messages.
+std::string FormatRegexSyntaxError(const char* regex, int index) {
+ return (Message() << "Syntax error at index " << index
+ << " in simple regular expression \"" << regex << "\": ").GetString();
+}
+
+// Generates non-fatal failures and returns false if regex is invalid;
+// otherwise returns true.
+bool ValidateRegex(const char* regex) {
+ if (regex == NULL) {
+ // TODO(wan@google.com): fix the source file location in the
+ // assertion failures to match where the regex is used in user
+ // code.
+ ADD_FAILURE() << "NULL is not a valid simple regular expression.";
+ return false;
+ }
+
+ bool is_valid = true;
+
+ // True iff ?, *, or + can follow the previous atom.
+ bool prev_repeatable = false;
+ for (int i = 0; regex[i]; i++) {
+ if (regex[i] == '\\') { // An escape sequence
+ i++;
+ if (regex[i] == '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "'\\' cannot appear at the end.";
+ return false;
+ }
+
+ if (!IsValidEscape(regex[i])) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1)
+ << "invalid escape sequence \"\\" << regex[i] << "\".";
+ is_valid = false;
+ }
+ prev_repeatable = true;
+ } else { // Not an escape sequence.
+ const char ch = regex[i];
+
+ if (ch == '^' && i > 0) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'^' can only appear at the beginning.";
+ is_valid = false;
+ } else if (ch == '$' && regex[i + 1] != '\0') {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'$' can only appear at the end.";
+ is_valid = false;
+ } else if (IsInSet(ch, "()[]{}|")) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' is unsupported.";
+ is_valid = false;
+ } else if (IsRepeat(ch) && !prev_repeatable) {
+ ADD_FAILURE() << FormatRegexSyntaxError(regex, i)
+ << "'" << ch << "' can only follow a repeatable token.";
+ is_valid = false;
+ }
+
+ prev_repeatable = !IsInSet(ch, "^$?*+");
+ }
+ }
+
+ return is_valid;
+}
+
+// Matches a repeated regex atom followed by a valid simple regular
+// expression. The regex atom is defined as c if escaped is false,
+// or \c otherwise. repeat is the repetition meta character (?, *,
+// or +). The behavior is undefined if str contains too many
+// characters to be indexable by size_t, in which case the test will
+// probably time out anyway. We are fine with this limitation as
+// std::string has it too.
+bool MatchRepetitionAndRegexAtHead(
+ bool escaped, char c, char repeat, const char* regex,
+ const char* str) {
+ const size_t min_count = (repeat == '+') ? 1 : 0;
+ const size_t max_count = (repeat == '?') ? 1 :
+ static_cast<size_t>(-1) - 1;
+ // We cannot call numeric_limits::max() as it conflicts with the
+ // max() macro on Windows.
+
+ for (size_t i = 0; i <= max_count; ++i) {
+ // We know that the atom matches each of the first i characters in str.
+ if (i >= min_count && MatchRegexAtHead(regex, str + i)) {
+ // We have enough matches at the head, and the tail matches too.
+ // Since we only care about *whether* the pattern matches str
+ // (as opposed to *how* it matches), there is no need to find a
+ // greedy match.
+ return true;
+ }
+ if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i]))
+ return false;
+ }
+ return false;
+}
+
+// Returns true iff regex matches a prefix of str. regex must be a
+// valid simple regular expression and not start with "^", or the
+// result is undefined.
+bool MatchRegexAtHead(const char* regex, const char* str) {
+ if (*regex == '\0') // An empty regex matches a prefix of anything.
+ return true;
+
+ // "$" only matches the end of a string. Note that regex being
+ // valid guarantees that there's nothing after "$" in it.
+ if (*regex == '$')
+ return *str == '\0';
+
+ // Is the first thing in regex an escape sequence?
+ const bool escaped = *regex == '\\';
+ if (escaped)
+ ++regex;
+ if (IsRepeat(regex[1])) {
+ // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so
+ // here's an indirect recursion. It terminates as the regex gets
+ // shorter in each recursion.
+ return MatchRepetitionAndRegexAtHead(
+ escaped, regex[0], regex[1], regex + 2, str);
+ } else {
+ // regex isn't empty, isn't "$", and doesn't start with a
+ // repetition. We match the first atom of regex with the first
+ // character of str and recurse.
+ return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) &&
+ MatchRegexAtHead(regex + 1, str + 1);
+ }
+}
+
+// Returns true iff regex matches any substring of str. regex must be
+// a valid simple regular expression, or the result is undefined.
+//
+// The algorithm is recursive, but the recursion depth doesn't exceed
+// the regex length, so we won't need to worry about running out of
+// stack space normally. In rare cases the time complexity can be
+// exponential with respect to the regex length + the string length,
+// but usually it's must faster (often close to linear).
+bool MatchRegexAnywhere(const char* regex, const char* str) {
+ if (regex == NULL || str == NULL)
+ return false;
+
+ if (*regex == '^')
+ return MatchRegexAtHead(regex + 1, str);
+
+ // A successful match can be anywhere in str.
+ do {
+ if (MatchRegexAtHead(regex, str))
+ return true;
+ } while (*str++ != '\0');
+ return false;
+}
+
+// Implements the RE class.
+
+RE::~RE() {
+ free(const_cast<char*>(pattern_));
+ free(const_cast<char*>(full_pattern_));
+}
+
+// Returns true iff regular expression re matches the entire str.
+bool RE::FullMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str);
+}
+
+// Returns true iff regular expression re matches a substring of str
+// (including str itself).
+bool RE::PartialMatch(const char* str, const RE& re) {
+ return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str);
+}
+
+// Initializes an RE from its string representation.
+void RE::Init(const char* regex) {
+ pattern_ = full_pattern_ = NULL;
+ if (regex != NULL) {
+ pattern_ = posix::StrDup(regex);
+ }
+
+ is_valid_ = ValidateRegex(regex);
+ if (!is_valid_) {
+ // No need to calculate the full pattern when the regex is invalid.
+ return;
+ }
+
+ const size_t len = strlen(regex);
+ // Reserves enough bytes to hold the regular expression used for a
+ // full match: we need space to prepend a '^', append a '$', and
+ // terminate the string with '\0'.
+ char* buffer = static_cast<char*>(malloc(len + 3));
+ full_pattern_ = buffer;
+
+ if (*regex != '^')
+ *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'.
+
+ // We don't use snprintf or strncpy, as they trigger a warning when
+ // compiled with VC++ 8.0.
+ memcpy(buffer, regex, len);
+ buffer += len;
+
+ if (len == 0 || regex[len - 1] != '$')
+ *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'.
+
+ *buffer = '\0';
+}
+
+#endif // GTEST_USES_POSIX_RE
+
+const char kUnknownFile[] = "unknown file";
+
+// Formats a source file path and a line number as they would appear
+// in an error message from the compiler used to compile this code.
+GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) {
+ const std::string file_name(file == NULL ? kUnknownFile : file);
+
+ if (line < 0) {
+ return file_name + ":";
+ }
+#ifdef _MSC_VER
+ return file_name + "(" + StreamableToString(line) + "):";
+#else
+ return file_name + ":" + StreamableToString(line) + ":";
+#endif // _MSC_VER
+}
+
+// Formats a file location for compiler-independent XML output.
+// Although this function is not platform dependent, we put it next to
+// FormatFileLocation in order to contrast the two functions.
+// Note that FormatCompilerIndependentFileLocation() does NOT append colon
+// to the file location it produces, unlike FormatFileLocation().
+GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(
+ const char* file, int line) {
+ const std::string file_name(file == NULL ? kUnknownFile : file);
+
+ if (line < 0)
+ return file_name;
+ else
+ return file_name + ":" + StreamableToString(line);
+}
+
+GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line)
+ : severity_(severity) {
+ const char* const marker =
+ severity == GTEST_INFO ? "[ INFO ]" :
+ severity == GTEST_WARNING ? "[WARNING]" :
+ severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]";
+ GetStream() << ::std::endl << marker << " "
+ << FormatFileLocation(file, line).c_str() << ": ";
+}
+
+// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program.
+GTestLog::~GTestLog() {
+ GetStream() << ::std::endl;
+ if (severity_ == GTEST_FATAL) {
+ fflush(stderr);
+ posix::Abort();
+ }
+}
+// Disable Microsoft deprecation warnings for POSIX functions called from
+// this class (creat, dup, dup2, and close)
+GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
+
+#if GTEST_HAS_STREAM_REDIRECTION
+
+// Object that captures an output stream (stdout/stderr).
+class CapturedStream {
+ public:
+ // The ctor redirects the stream to a temporary file.
+ explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) {
+# if GTEST_OS_WINDOWS
+ char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+ char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT
+
+ ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path);
+ const UINT success = ::GetTempFileNameA(temp_dir_path,
+ "gtest_redir",
+ 0, // Generate unique file name.
+ temp_file_path);
+ GTEST_CHECK_(success != 0)
+ << "Unable to create a temporary file in " << temp_dir_path;
+ const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE);
+ GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file "
+ << temp_file_path;
+ filename_ = temp_file_path;
+# else
+ // There's no guarantee that a test has write access to the current
+ // directory, so we create the temporary file in the /tmp directory
+ // instead. We use /tmp on most systems, and /sdcard on Android.
+ // That's because Android doesn't have /tmp.
+# if GTEST_OS_LINUX_ANDROID
+ // Note: Android applications are expected to call the framework's
+ // Context.getExternalStorageDirectory() method through JNI to get
+ // the location of the world-writable SD Card directory. However,
+ // this requires a Context handle, which cannot be retrieved
+ // globally from native code. Doing so also precludes running the
+ // code as part of a regular standalone executable, which doesn't
+ // run in a Dalvik process (e.g. when running it through 'adb shell').
+ //
+ // The location /sdcard is directly accessible from native code
+ // and is the only location (unofficially) supported by the Android
+ // team. It's generally a symlink to the real SD Card mount point
+ // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or
+ // other OEM-customized locations. Never rely on these, and always
+ // use /sdcard.
+ char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX";
+# else
+ char name_template[] = "/tmp/captured_stream.XXXXXX";
+# endif // GTEST_OS_LINUX_ANDROID
+ const int captured_fd = mkstemp(name_template);
+ filename_ = name_template;
+# endif // GTEST_OS_WINDOWS
+ fflush(NULL);
+ dup2(captured_fd, fd_);
+ close(captured_fd);
+ }
+
+ ~CapturedStream() {
+ remove(filename_.c_str());
+ }
+
+ std::string GetCapturedString() {
+ if (uncaptured_fd_ != -1) {
+ // Restores the original stream.
+ fflush(NULL);
+ dup2(uncaptured_fd_, fd_);
+ close(uncaptured_fd_);
+ uncaptured_fd_ = -1;
+ }
+
+ FILE* const file = posix::FOpen(filename_.c_str(), "r");
+ const std::string content = ReadEntireFile(file);
+ posix::FClose(file);
+ return content;
+ }
+
+ private:
+ const int fd_; // A stream to capture.
+ int uncaptured_fd_;
+ // Name of the temporary file holding the stderr output.
+ ::std::string filename_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream);
+};
+
+GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+static CapturedStream* g_captured_stderr = NULL;
+static CapturedStream* g_captured_stdout = NULL;
+
+// Starts capturing an output stream (stdout/stderr).
+void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) {
+ if (*stream != NULL) {
+ GTEST_LOG_(FATAL) << "Only one " << stream_name
+ << " capturer can exist at a time.";
+ }
+ *stream = new CapturedStream(fd);
+}
+
+// Stops capturing the output stream and returns the captured string.
+std::string GetCapturedStream(CapturedStream** captured_stream) {
+ const std::string content = (*captured_stream)->GetCapturedString();
+
+ delete *captured_stream;
+ *captured_stream = NULL;
+
+ return content;
+}
+
+// Starts capturing stdout.
+void CaptureStdout() {
+ CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout);
+}
+
+// Starts capturing stderr.
+void CaptureStderr() {
+ CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr);
+}
+
+// Stops capturing stdout and returns the captured string.
+std::string GetCapturedStdout() {
+ return GetCapturedStream(&g_captured_stdout);
+}
+
+// Stops capturing stderr and returns the captured string.
+std::string GetCapturedStderr() {
+ return GetCapturedStream(&g_captured_stderr);
+}
+
+#endif // GTEST_HAS_STREAM_REDIRECTION
+
+std::string TempDir() {
+#if GTEST_OS_WINDOWS_MOBILE
+ return "\\temp\\";
+#elif GTEST_OS_WINDOWS
+ const char* temp_dir = posix::GetEnv("TEMP");
+ if (temp_dir == NULL || temp_dir[0] == '\0')
+ return "\\temp\\";
+ else if (temp_dir[strlen(temp_dir) - 1] == '\\')
+ return temp_dir;
+ else
+ return std::string(temp_dir) + "\\";
+#elif GTEST_OS_LINUX_ANDROID
+ return "/sdcard/";
+#else
+ return "/tmp/";
+#endif // GTEST_OS_WINDOWS_MOBILE
+}
+
+size_t GetFileSize(FILE* file) {
+ fseek(file, 0, SEEK_END);
+ return static_cast<size_t>(ftell(file));
+}
+
+std::string ReadEntireFile(FILE* file) {
+ const size_t file_size = GetFileSize(file);
+ char* const buffer = new char[file_size];
+
+ size_t bytes_last_read = 0; // # of bytes read in the last fread()
+ size_t bytes_read = 0; // # of bytes read so far
+
+ fseek(file, 0, SEEK_SET);
+
+ // Keeps reading the file until we cannot read further or the
+ // pre-determined file size is reached.
+ do {
+ bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file);
+ bytes_read += bytes_last_read;
+ } while (bytes_last_read > 0 && bytes_read < file_size);
+
+ const std::string content(buffer, bytes_read);
+ delete[] buffer;
+
+ return content;
+}
+
+#if GTEST_HAS_DEATH_TEST
+
+static const ::std::vector<testing::internal::string>* g_injected_test_argvs =
+ NULL; // Owned.
+
+void SetInjectableArgvs(const ::std::vector<testing::internal::string>* argvs) {
+ if (g_injected_test_argvs != argvs)
+ delete g_injected_test_argvs;
+ g_injected_test_argvs = argvs;
+}
+
+const ::std::vector<testing::internal::string>& GetInjectableArgvs() {
+ if (g_injected_test_argvs != NULL) {
+ return *g_injected_test_argvs;
+ }
+ return GetArgvs();
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+#if GTEST_OS_WINDOWS_MOBILE
+namespace posix {
+void Abort() {
+ DebugBreak();
+ TerminateProcess(GetCurrentProcess(), 1);
+}
+} // namespace posix
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Returns the name of the environment variable corresponding to the
+// given flag. For example, FlagToEnvVar("foo") will return
+// "GTEST_FOO" in the open-source version.
+static std::string FlagToEnvVar(const char* flag) {
+ const std::string full_flag =
+ (Message() << GTEST_FLAG_PREFIX_ << flag).GetString();
+
+ Message env_var;
+ for (size_t i = 0; i != full_flag.length(); i++) {
+ env_var << ToUpper(full_flag.c_str()[i]);
+ }
+
+ return env_var.GetString();
+}
+
+// Parses 'str' for a 32-bit signed integer. If successful, writes
+// the result to *value and returns true; otherwise leaves *value
+// unchanged and returns false.
+bool ParseInt32(const Message& src_text, const char* str, Int32* value) {
+ // Parses the environment variable as a decimal integer.
+ char* end = NULL;
+ const long long_value = strtol(str, &end, 10); // NOLINT
+
+ // Has strtol() consumed all characters in the string?
+ if (*end != '\0') {
+ // No - an invalid character was encountered.
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value \"" << str << "\".\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ // Is the parsed value in the range of an Int32?
+ const Int32 result = static_cast<Int32>(long_value);
+ if (long_value == LONG_MAX || long_value == LONG_MIN ||
+ // The parsed value overflows as a long. (strtol() returns
+ // LONG_MAX or LONG_MIN when the input overflows.)
+ result != long_value
+ // The parsed value overflows as an Int32.
+ ) {
+ Message msg;
+ msg << "WARNING: " << src_text
+ << " is expected to be a 32-bit integer, but actually"
+ << " has value " << str << ", which overflows.\n";
+ printf("%s", msg.GetString().c_str());
+ fflush(stdout);
+ return false;
+ }
+
+ *value = result;
+ return true;
+}
+
+// Reads and returns the Boolean environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+//
+// The value is considered true iff it's not "0".
+bool BoolFromGTestEnv(const char* flag, bool default_value) {
+#if defined(GTEST_GET_BOOL_FROM_ENV_)
+ return GTEST_GET_BOOL_FROM_ENV_(flag, default_value);
+#endif // defined(GTEST_GET_BOOL_FROM_ENV_)
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ return string_value == NULL ?
+ default_value : strcmp(string_value, "0") != 0;
+}
+
+// Reads and returns a 32-bit integer stored in the environment
+// variable corresponding to the given flag; if it isn't set or
+// doesn't represent a valid 32-bit integer, returns default_value.
+Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) {
+#if defined(GTEST_GET_INT32_FROM_ENV_)
+ return GTEST_GET_INT32_FROM_ENV_(flag, default_value);
+#endif // defined(GTEST_GET_INT32_FROM_ENV_)
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* const string_value = posix::GetEnv(env_var.c_str());
+ if (string_value == NULL) {
+ // The environment variable is not set.
+ return default_value;
+ }
+
+ Int32 result = default_value;
+ if (!ParseInt32(Message() << "Environment variable " << env_var,
+ string_value, &result)) {
+ printf("The default value %s is used.\n",
+ (Message() << default_value).GetString().c_str());
+ fflush(stdout);
+ return default_value;
+ }
+
+ return result;
+}
+
+// Reads and returns the string environment variable corresponding to
+// the given flag; if it's not set, returns default_value.
+std::string StringFromGTestEnv(const char* flag, const char* default_value) {
+#if defined(GTEST_GET_STRING_FROM_ENV_)
+ return GTEST_GET_STRING_FROM_ENV_(flag, default_value);
+#endif // defined(GTEST_GET_STRING_FROM_ENV_)
+ const std::string env_var = FlagToEnvVar(flag);
+ const char* value = posix::GetEnv(env_var.c_str());
+ if (value != NULL) {
+ return value;
+ }
+
+ // As a special case for the 'output' flag, if GTEST_OUTPUT is not
+ // set, we look for XML_OUTPUT_FILE, which is set by the Bazel build
+ // system. The value of XML_OUTPUT_FILE is a filename without the
+ // "xml:" prefix of GTEST_OUTPUT.
+ //
+ // The net priority order after flag processing is thus:
+ // --gtest_output command line flag
+ // GTEST_OUTPUT environment variable
+ // XML_OUTPUT_FILE environment variable
+ // 'default_value'
+ if (strcmp(flag, "output") == 0) {
+ value = posix::GetEnv("XML_OUTPUT_FILE");
+ if (value != NULL) {
+ return std::string("xml:") + value;
+ }
+ }
+ return default_value;
+}
+
+} // namespace internal
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-printers.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-printers.cc
new file mode 100644
index 000000000..a2df412f8
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-printers.cc
@@ -0,0 +1,373 @@
+// Copyright 2007, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+// Google Test - The Google C++ Testing Framework
+//
+// This file implements a universal value printer that can print a
+// value of any type T:
+//
+// void ::testing::internal::UniversalPrinter<T>::Print(value, ostream_ptr);
+//
+// It uses the << operator when possible, and prints the bytes in the
+// object otherwise. A user can override its behavior for a class
+// type Foo by defining either operator<<(::std::ostream&, const Foo&)
+// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that
+// defines Foo.
+
+#include "gtest/gtest-printers.h"
+#include <ctype.h>
+#include <stdio.h>
+#include <cwchar>
+#include <ostream> // NOLINT
+#include <string>
+#include "gtest/internal/gtest-port.h"
+
+namespace testing {
+
+namespace {
+
+using ::std::ostream;
+
+// Prints a segment of bytes in the given object.
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start,
+ size_t count, ostream* os) {
+ char text[5] = "";
+ for (size_t i = 0; i != count; i++) {
+ const size_t j = start + i;
+ if (i != 0) {
+ // Organizes the bytes into groups of 2 for easy parsing by
+ // human.
+ if ((j % 2) == 0)
+ *os << ' ';
+ else
+ *os << '-';
+ }
+ GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]);
+ *os << text;
+ }
+}
+
+// Prints the bytes in the given value to the given ostream.
+void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count,
+ ostream* os) {
+ // Tells the user how big the object is.
+ *os << count << "-byte object <";
+
+ const size_t kThreshold = 132;
+ const size_t kChunkSize = 64;
+ // If the object size is bigger than kThreshold, we'll have to omit
+ // some details by printing only the first and the last kChunkSize
+ // bytes.
+ // TODO(wan): let the user control the threshold using a flag.
+ if (count < kThreshold) {
+ PrintByteSegmentInObjectTo(obj_bytes, 0, count, os);
+ } else {
+ PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os);
+ *os << " ... ";
+ // Rounds up to 2-byte boundary.
+ const size_t resume_pos = (count - kChunkSize + 1)/2*2;
+ PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os);
+ }
+ *os << ">";
+}
+
+} // namespace
+
+namespace internal2 {
+
+// Delegates to PrintBytesInObjectToImpl() to print the bytes in the
+// given object. The delegation simplifies the implementation, which
+// uses the << operator and thus is easier done outside of the
+// ::testing::internal namespace, which contains a << operator that
+// sometimes conflicts with the one in STL.
+void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count,
+ ostream* os) {
+ PrintBytesInObjectToImpl(obj_bytes, count, os);
+}
+
+} // namespace internal2
+
+namespace internal {
+
+// Depending on the value of a char (or wchar_t), we print it in one
+// of three formats:
+// - as is if it's a printable ASCII (e.g. 'a', '2', ' '),
+// - as a hexidecimal escape sequence (e.g. '\x7F'), or
+// - as a special escape sequence (e.g. '\r', '\n').
+enum CharFormat {
+ kAsIs,
+ kHexEscape,
+ kSpecialEscape
+};
+
+// Returns true if c is a printable ASCII character. We test the
+// value of c directly instead of calling isprint(), which is buggy on
+// Windows Mobile.
+inline bool IsPrintableAscii(wchar_t c) {
+ return 0x20 <= c && c <= 0x7E;
+}
+
+// Prints a wide or narrow char c as a character literal without the
+// quotes, escaping it when necessary; returns how c was formatted.
+// The template argument UnsignedChar is the unsigned version of Char,
+// which is the type of c.
+template <typename UnsignedChar, typename Char>
+static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) {
+ switch (static_cast<wchar_t>(c)) {
+ case L'\0':
+ *os << "\\0";
+ break;
+ case L'\'':
+ *os << "\\'";
+ break;
+ case L'\\':
+ *os << "\\\\";
+ break;
+ case L'\a':
+ *os << "\\a";
+ break;
+ case L'\b':
+ *os << "\\b";
+ break;
+ case L'\f':
+ *os << "\\f";
+ break;
+ case L'\n':
+ *os << "\\n";
+ break;
+ case L'\r':
+ *os << "\\r";
+ break;
+ case L'\t':
+ *os << "\\t";
+ break;
+ case L'\v':
+ *os << "\\v";
+ break;
+ default:
+ if (IsPrintableAscii(c)) {
+ *os << static_cast<char>(c);
+ return kAsIs;
+ } else {
+ *os << "\\x" + String::FormatHexInt(static_cast<UnsignedChar>(c));
+ return kHexEscape;
+ }
+ }
+ return kSpecialEscape;
+}
+
+// Prints a wchar_t c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) {
+ switch (c) {
+ case L'\'':
+ *os << "'";
+ return kAsIs;
+ case L'"':
+ *os << "\\\"";
+ return kSpecialEscape;
+ default:
+ return PrintAsCharLiteralTo<wchar_t>(c, os);
+ }
+}
+
+// Prints a char c as if it's part of a string literal, escaping it when
+// necessary; returns how c was formatted.
+static CharFormat PrintAsStringLiteralTo(char c, ostream* os) {
+ return PrintAsStringLiteralTo(
+ static_cast<wchar_t>(static_cast<unsigned char>(c)), os);
+}
+
+// Prints a wide or narrow character c and its code. '\0' is printed
+// as "'\\0'", other unprintable characters are also properly escaped
+// using the standard C++ escape sequence. The template argument
+// UnsignedChar is the unsigned version of Char, which is the type of c.
+template <typename UnsignedChar, typename Char>
+void PrintCharAndCodeTo(Char c, ostream* os) {
+ // First, print c as a literal in the most readable form we can find.
+ *os << ((sizeof(c) > 1) ? "L'" : "'");
+ const CharFormat format = PrintAsCharLiteralTo<UnsignedChar>(c, os);
+ *os << "'";
+
+ // To aid user debugging, we also print c's code in decimal, unless
+ // it's 0 (in which case c was printed as '\\0', making the code
+ // obvious).
+ if (c == 0)
+ return;
+ *os << " (" << static_cast<int>(c);
+
+ // For more convenience, we print c's code again in hexidecimal,
+ // unless c was already printed in the form '\x##' or the code is in
+ // [1, 9].
+ if (format == kHexEscape || (1 <= c && c <= 9)) {
+ // Do nothing.
+ } else {
+ *os << ", 0x" << String::FormatHexInt(static_cast<UnsignedChar>(c));
+ }
+ *os << ")";
+}
+
+void PrintTo(unsigned char c, ::std::ostream* os) {
+ PrintCharAndCodeTo<unsigned char>(c, os);
+}
+void PrintTo(signed char c, ::std::ostream* os) {
+ PrintCharAndCodeTo<unsigned char>(c, os);
+}
+
+// Prints a wchar_t as a symbol if it is printable or as its internal
+// code otherwise and also as its code. L'\0' is printed as "L'\\0'".
+void PrintTo(wchar_t wc, ostream* os) {
+ PrintCharAndCodeTo<wchar_t>(wc, os);
+}
+
+// Prints the given array of characters to the ostream. CharType must be either
+// char or wchar_t.
+// The array starts at begin, the length is len, it may include '\0' characters
+// and may not be NUL-terminated.
+template <typename CharType>
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+static void PrintCharsAsStringTo(
+ const CharType* begin, size_t len, ostream* os) {
+ const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\"";
+ *os << kQuoteBegin;
+ bool is_previous_hex = false;
+ for (size_t index = 0; index < len; ++index) {
+ const CharType cur = begin[index];
+ if (is_previous_hex && IsXDigit(cur)) {
+ // Previous character is of '\x..' form and this character can be
+ // interpreted as another hexadecimal digit in its number. Break string to
+ // disambiguate.
+ *os << "\" " << kQuoteBegin;
+ }
+ is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape;
+ }
+ *os << "\"";
+}
+
+// Prints a (const) char/wchar_t array of 'len' elements, starting at address
+// 'begin'. CharType must be either char or wchar_t.
+template <typename CharType>
+GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_
+GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_
+GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_
+static void UniversalPrintCharArray(
+ const CharType* begin, size_t len, ostream* os) {
+ // The code
+ // const char kFoo[] = "foo";
+ // generates an array of 4, not 3, elements, with the last one being '\0'.
+ //
+ // Therefore when printing a char array, we don't print the last element if
+ // it's '\0', such that the output matches the string literal as it's
+ // written in the source code.
+ if (len > 0 && begin[len - 1] == '\0') {
+ PrintCharsAsStringTo(begin, len - 1, os);
+ return;
+ }
+
+ // If, however, the last element in the array is not '\0', e.g.
+ // const char kFoo[] = { 'f', 'o', 'o' };
+ // we must print the entire array. We also print a message to indicate
+ // that the array is not NUL-terminated.
+ PrintCharsAsStringTo(begin, len, os);
+ *os << " (no terminating NUL)";
+}
+
+// Prints a (const) char array of 'len' elements, starting at address 'begin'.
+void UniversalPrintArray(const char* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+
+// Prints a (const) wchar_t array of 'len' elements, starting at address
+// 'begin'.
+void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) {
+ UniversalPrintCharArray(begin, len, os);
+}
+
+// Prints the given C string to the ostream.
+void PrintTo(const char* s, ostream* os) {
+ if (s == NULL) {
+ *os << "NULL";
+ } else {
+ *os << ImplicitCast_<const void*>(s) << " pointing to ";
+ PrintCharsAsStringTo(s, strlen(s), os);
+ }
+}
+
+// MSVC compiler can be configured to define whar_t as a typedef
+// of unsigned short. Defining an overload for const wchar_t* in that case
+// would cause pointers to unsigned shorts be printed as wide strings,
+// possibly accessing more memory than intended and causing invalid
+// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when
+// wchar_t is implemented as a native type.
+#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED)
+// Prints the given wide C string to the ostream.
+void PrintTo(const wchar_t* s, ostream* os) {
+ if (s == NULL) {
+ *os << "NULL";
+ } else {
+ *os << ImplicitCast_<const void*>(s) << " pointing to ";
+ PrintCharsAsStringTo(s, std::wcslen(s), os);
+ }
+}
+#endif // wchar_t is native
+
+// Prints a ::string object.
+#if GTEST_HAS_GLOBAL_STRING
+void PrintStringTo(const ::string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_GLOBAL_STRING
+
+void PrintStringTo(const ::std::string& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+
+// Prints a ::wstring object.
+#if GTEST_HAS_GLOBAL_WSTRING
+void PrintWideStringTo(const ::wstring& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+#if GTEST_HAS_STD_WSTRING
+void PrintWideStringTo(const ::std::wstring& s, ostream* os) {
+ PrintCharsAsStringTo(s.data(), s.size(), os);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+} // namespace internal
+
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-test-part.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-test-part.cc
new file mode 100644
index 000000000..fb0e35425
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-test-part.cc
@@ -0,0 +1,110 @@
+// Copyright 2008, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: mheule@google.com (Markus Heule)
+//
+// The Google C++ Testing Framework (Google Test)
+
+#include "gtest/gtest-test-part.h"
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick exists to
+// prevent the accidental inclusion of gtest-internal-inl.h in the
+// user's code.
+#define GTEST_IMPLEMENTATION_ 1
+#include "src/gtest-internal-inl.h"
+#undef GTEST_IMPLEMENTATION_
+
+namespace testing {
+
+using internal::GetUnitTestImpl;
+
+// Gets the summary of the failure message by omitting the stack trace
+// in it.
+std::string TestPartResult::ExtractSummary(const char* message) {
+ const char* const stack_trace = strstr(message, internal::kStackTraceMarker);
+ return stack_trace == NULL ? message :
+ std::string(message, stack_trace);
+}
+
+// Prints a TestPartResult object.
+std::ostream& operator<<(std::ostream& os, const TestPartResult& result) {
+ return os
+ << result.file_name() << ":" << result.line_number() << ": "
+ << (result.type() == TestPartResult::kSuccess ? "Success" :
+ result.type() == TestPartResult::kFatalFailure ? "Fatal failure" :
+ "Non-fatal failure") << ":\n"
+ << result.message() << std::endl;
+}
+
+// Appends a TestPartResult to the array.
+void TestPartResultArray::Append(const TestPartResult& result) {
+ array_.push_back(result);
+}
+
+// Returns the TestPartResult at the given index (0-based).
+const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const {
+ if (index < 0 || index >= size()) {
+ printf("\nInvalid index (%d) into TestPartResultArray.\n", index);
+ internal::posix::Abort();
+ }
+
+ return array_[index];
+}
+
+// Returns the number of TestPartResult objects in the array.
+int TestPartResultArray::size() const {
+ return static_cast<int>(array_.size());
+}
+
+namespace internal {
+
+HasNewFatalFailureHelper::HasNewFatalFailureHelper()
+ : has_new_fatal_failure_(false),
+ original_reporter_(GetUnitTestImpl()->
+ GetTestPartResultReporterForCurrentThread()) {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this);
+}
+
+HasNewFatalFailureHelper::~HasNewFatalFailureHelper() {
+ GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(
+ original_reporter_);
+}
+
+void HasNewFatalFailureHelper::ReportTestPartResult(
+ const TestPartResult& result) {
+ if (result.fatally_failed())
+ has_new_fatal_failure_ = true;
+ original_reporter_->ReportTestPartResult(result);
+}
+
+} // namespace internal
+
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest-typed-test.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-typed-test.cc
new file mode 100644
index 000000000..df1eef475
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest-typed-test.cc
@@ -0,0 +1,118 @@
+// Copyright 2008 Google Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+
+#include "gtest/gtest-typed-test.h"
+#include "gtest/gtest.h"
+
+namespace testing {
+namespace internal {
+
+#if GTEST_HAS_TYPED_TEST_P
+
+// Skips to the first non-space char in str. Returns an empty string if str
+// contains only whitespace characters.
+static const char* SkipSpaces(const char* str) {
+ while (IsSpace(*str))
+ str++;
+ return str;
+}
+
+static std::vector<std::string> SplitIntoTestNames(const char* src) {
+ std::vector<std::string> name_vec;
+ src = SkipSpaces(src);
+ for (; src != NULL; src = SkipComma(src)) {
+ name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src)));
+ }
+ return name_vec;
+}
+
+// Verifies that registered_tests match the test names in
+// registered_tests_; returns registered_tests if successful, or
+// aborts the program otherwise.
+const char* TypedTestCasePState::VerifyRegisteredTestNames(
+ const char* file, int line, const char* registered_tests) {
+ typedef RegisteredTestsMap::const_iterator RegisteredTestIter;
+ registered_ = true;
+
+ std::vector<std::string> name_vec = SplitIntoTestNames(registered_tests);
+
+ Message errors;
+
+ std::set<std::string> tests;
+ for (std::vector<std::string>::const_iterator name_it = name_vec.begin();
+ name_it != name_vec.end(); ++name_it) {
+ const std::string& name = *name_it;
+ if (tests.count(name) != 0) {
+ errors << "Test " << name << " is listed more than once.\n";
+ continue;
+ }
+
+ bool found = false;
+ for (RegisteredTestIter it = registered_tests_.begin();
+ it != registered_tests_.end();
+ ++it) {
+ if (name == it->first) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found) {
+ tests.insert(name);
+ } else {
+ errors << "No test named " << name
+ << " can be found in this test case.\n";
+ }
+ }
+
+ for (RegisteredTestIter it = registered_tests_.begin();
+ it != registered_tests_.end();
+ ++it) {
+ if (tests.count(it->first) == 0) {
+ errors << "You forgot to list test " << it->first << ".\n";
+ }
+ }
+
+ const std::string& errors_str = errors.GetString();
+ if (errors_str != "") {
+ fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(),
+ errors_str.c_str());
+ fflush(stderr);
+ posix::Abort();
+ }
+
+ return registered_tests;
+}
+
+#endif // GTEST_HAS_TYPED_TEST_P
+
+} // namespace internal
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest.cc
new file mode 100644
index 000000000..5a8932c73
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest.cc
@@ -0,0 +1,5389 @@
+// Copyright 2005, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Author: wan@google.com (Zhanyong Wan)
+//
+// The Google C++ Testing Framework (Google Test)
+
+#include "gtest/gtest.h"
+#include "gtest/internal/custom/gtest.h"
+#include "gtest/gtest-spi.h"
+
+#include <ctype.h>
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <time.h>
+#include <wchar.h>
+#include <wctype.h>
+
+#include <algorithm>
+#include <iomanip>
+#include <limits>
+#include <list>
+#include <map>
+#include <ostream> // NOLINT
+#include <sstream>
+#include <vector>
+
+#if GTEST_OS_LINUX
+
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+# include <fcntl.h> // NOLINT
+# include <limits.h> // NOLINT
+# include <sched.h> // NOLINT
+// Declares vsnprintf(). This header is not available on Windows.
+# include <strings.h> // NOLINT
+# include <sys/mman.h> // NOLINT
+# include <sys/time.h> // NOLINT
+# include <unistd.h> // NOLINT
+# include <string>
+
+#elif GTEST_OS_SYMBIAN
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h> // NOLINT
+
+#elif GTEST_OS_ZOS
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h> // NOLINT
+
+// On z/OS we additionally need strings.h for strcasecmp.
+# include <strings.h> // NOLINT
+
+#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE.
+
+# include <windows.h> // NOLINT
+# undef min
+
+#elif GTEST_OS_WINDOWS // We are on Windows proper.
+
+# include <io.h> // NOLINT
+# include <sys/timeb.h> // NOLINT
+# include <sys/types.h> // NOLINT
+# include <sys/stat.h> // NOLINT
+
+# if GTEST_OS_WINDOWS_MINGW
+// MinGW has gettimeofday() but not _ftime64().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+// TODO(kenton@google.com): There are other ways to get the time on
+// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW
+// supports these. consider using them instead.
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+# include <sys/time.h> // NOLINT
+# endif // GTEST_OS_WINDOWS_MINGW
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <windows.h> // NOLINT
+# undef min
+
+#else
+
+// Assume other platforms have gettimeofday().
+// TODO(kenton@google.com): Use autoconf to detect availability of
+// gettimeofday().
+# define GTEST_HAS_GETTIMEOFDAY_ 1
+
+// cpplint thinks that the header is already included, so we want to
+// silence it.
+# include <sys/time.h> // NOLINT
+# include <unistd.h> // NOLINT
+
+#endif // GTEST_OS_LINUX
+
+#if GTEST_HAS_EXCEPTIONS
+# include <stdexcept>
+#endif
+
+#if GTEST_CAN_STREAM_RESULTS_
+# include <arpa/inet.h> // NOLINT
+# include <netdb.h> // NOLINT
+# include <sys/socket.h> // NOLINT
+# include <sys/types.h> // NOLINT
+#endif
+
+// Indicates that this translation unit is part of Google Test's
+// implementation. It must come before gtest-internal-inl.h is
+// included, or there will be a compiler error. This trick is to
+// prevent a user from accidentally including gtest-internal-inl.h in
+// his code.
+#define GTEST_IMPLEMENTATION_ 1
+#include "src/gtest-internal-inl.h"
+#undef GTEST_IMPLEMENTATION_
+
+#if GTEST_OS_WINDOWS
+# define vsnprintf _vsnprintf
+#endif // GTEST_OS_WINDOWS
+
+namespace testing {
+
+using internal::CountIf;
+using internal::ForEach;
+using internal::GetElementOr;
+using internal::Shuffle;
+
+// Constants.
+
+// A test whose test case name or test name matches this filter is
+// disabled and not run.
+static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*";
+
+// A test case whose name matches this filter is considered a death
+// test case and will be run before test cases whose name doesn't
+// match this filter.
+static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*";
+
+// A test filter that matches everything.
+static const char kUniversalFilter[] = "*";
+
+// The default output file for XML output.
+static const char kDefaultOutputFile[] = "test_detail.xml";
+
+// The environment variable name for the test shard index.
+static const char kTestShardIndex[] = "GTEST_SHARD_INDEX";
+// The environment variable name for the total number of test shards.
+static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS";
+// The environment variable name for the test shard status file.
+static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE";
+
+namespace internal {
+
+// The text used in failure messages to indicate the start of the
+// stack trace.
+const char kStackTraceMarker[] = "\nStack trace:\n";
+
+// g_help_flag is true iff the --help flag or an equivalent form is
+// specified on the command line.
+bool g_help_flag = false;
+
+} // namespace internal
+
+static const char* GetDefaultFilter() {
+#ifdef GTEST_TEST_FILTER_ENV_VAR_
+ const char* const testbridge_test_only = getenv(GTEST_TEST_FILTER_ENV_VAR_);
+ if (testbridge_test_only != NULL) {
+ return testbridge_test_only;
+ }
+#endif // GTEST_TEST_FILTER_ENV_VAR_
+ return kUniversalFilter;
+}
+
+GTEST_DEFINE_bool_(
+ also_run_disabled_tests,
+ internal::BoolFromGTestEnv("also_run_disabled_tests", false),
+ "Run disabled tests too, in addition to the tests normally being run.");
+
+GTEST_DEFINE_bool_(
+ break_on_failure,
+ internal::BoolFromGTestEnv("break_on_failure", false),
+ "True iff a failed assertion should be a debugger break-point.");
+
+GTEST_DEFINE_bool_(
+ catch_exceptions,
+ internal::BoolFromGTestEnv("catch_exceptions", true),
+ "True iff " GTEST_NAME_
+ " should catch exceptions and treat them as test failures.");
+
+GTEST_DEFINE_string_(
+ color,
+ internal::StringFromGTestEnv("color", "auto"),
+ "Whether to use colors in the output. Valid values: yes, no, "
+ "and auto. 'auto' means to use colors if the output is "
+ "being sent to a terminal and the TERM environment variable "
+ "is set to a terminal type that supports colors.");
+
+GTEST_DEFINE_string_(
+ filter,
+ internal::StringFromGTestEnv("filter", GetDefaultFilter()),
+ "A colon-separated list of glob (not regex) patterns "
+ "for filtering the tests to run, optionally followed by a "
+ "'-' and a : separated list of negative patterns (tests to "
+ "exclude). A test is run if it matches one of the positive "
+ "patterns and does not match any of the negative patterns.");
+
+GTEST_DEFINE_bool_(list_tests, false,
+ "List all tests without running them.");
+
+GTEST_DEFINE_string_(
+ output,
+ internal::StringFromGTestEnv("output", ""),
+ "A format (currently must be \"xml\"), optionally followed "
+ "by a colon and an output file name or directory. A directory "
+ "is indicated by a trailing pathname separator. "
+ "Examples: \"xml:filename.xml\", \"xml::directoryname/\". "
+ "If a directory is specified, output files will be created "
+ "within that directory, with file-names based on the test "
+ "executable's name and, if necessary, made unique by adding "
+ "digits.");
+
+GTEST_DEFINE_bool_(
+ print_time,
+ internal::BoolFromGTestEnv("print_time", true),
+ "True iff " GTEST_NAME_
+ " should display elapsed time in text output.");
+
+GTEST_DEFINE_int32_(
+ random_seed,
+ internal::Int32FromGTestEnv("random_seed", 0),
+ "Random number seed to use when shuffling test orders. Must be in range "
+ "[1, 99999], or 0 to use a seed based on the current time.");
+
+GTEST_DEFINE_int32_(
+ repeat,
+ internal::Int32FromGTestEnv("repeat", 1),
+ "How many times to repeat each test. Specify a negative number "
+ "for repeating forever. Useful for shaking out flaky tests.");
+
+GTEST_DEFINE_bool_(
+ show_internal_stack_frames, false,
+ "True iff " GTEST_NAME_ " should include internal stack frames when "
+ "printing test failure stack traces.");
+
+GTEST_DEFINE_bool_(
+ shuffle,
+ internal::BoolFromGTestEnv("shuffle", false),
+ "True iff " GTEST_NAME_
+ " should randomize tests' order on every run.");
+
+GTEST_DEFINE_int32_(
+ stack_trace_depth,
+ internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth),
+ "The maximum number of stack frames to print when an "
+ "assertion fails. The valid range is 0 through 100, inclusive.");
+
+GTEST_DEFINE_string_(
+ stream_result_to,
+ internal::StringFromGTestEnv("stream_result_to", ""),
+ "This flag specifies the host name and the port number on which to stream "
+ "test results. Example: \"localhost:555\". The flag is effective only on "
+ "Linux.");
+
+GTEST_DEFINE_bool_(
+ throw_on_failure,
+ internal::BoolFromGTestEnv("throw_on_failure", false),
+ "When this flag is specified, a failed assertion will throw an exception "
+ "if exceptions are enabled or exit the program with a non-zero code "
+ "otherwise.");
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+GTEST_DEFINE_string_(
+ flagfile,
+ internal::StringFromGTestEnv("flagfile", ""),
+ "This flag specifies the flagfile to read command-line flags from.");
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+namespace internal {
+
+// Generates a random number from [0, range), using a Linear
+// Congruential Generator (LCG). Crashes if 'range' is 0 or greater
+// than kMaxRange.
+GTEST_ATTRIBUTE_NO_SANITIZE_UNSIGNED_OVERFLOW_
+UInt32 Random::Generate(UInt32 range) {
+ // These constants are the same as are used in glibc's rand(3).
+ state_ = (1103515245U*state_ + 12345U) % kMaxRange;
+
+ GTEST_CHECK_(range > 0)
+ << "Cannot generate a number in the range [0, 0).";
+ GTEST_CHECK_(range <= kMaxRange)
+ << "Generation of a number in [0, " << range << ") was requested, "
+ << "but this can only generate numbers in [0, " << kMaxRange << ").";
+
+ // Converting via modulus introduces a bit of downward bias, but
+ // it's simple, and a linear congruential generator isn't too good
+ // to begin with.
+ return state_ % range;
+}
+
+// GTestIsInitialized() returns true iff the user has initialized
+// Google Test. Useful for catching the user mistake of not initializing
+// Google Test before calling RUN_ALL_TESTS().
+static bool GTestIsInitialized() { return GetArgvs().size() > 0; }
+
+// Iterates over a vector of TestCases, keeping a running sum of the
+// results of calling a given int-returning method on each.
+// Returns the sum.
+static int SumOverTestCaseList(const std::vector<TestCase*>& case_list,
+ int (TestCase::*method)() const) {
+ int sum = 0;
+ for (size_t i = 0; i < case_list.size(); i++) {
+ sum += (case_list[i]->*method)();
+ }
+ return sum;
+}
+
+// Returns true iff the test case passed.
+static bool TestCasePassed(const TestCase* test_case) {
+ return test_case->should_run() && test_case->Passed();
+}
+
+// Returns true iff the test case failed.
+static bool TestCaseFailed(const TestCase* test_case) {
+ return test_case->should_run() && test_case->Failed();
+}
+
+// Returns true iff test_case contains at least one test that should
+// run.
+static bool ShouldRunTestCase(const TestCase* test_case) {
+ return test_case->should_run();
+}
+
+// AssertHelper constructor.
+AssertHelper::AssertHelper(TestPartResult::Type type,
+ const char* file,
+ int line,
+ const char* message)
+ : data_(new AssertHelperData(type, file, line, message)) {
+}
+
+AssertHelper::~AssertHelper() {
+ delete data_;
+}
+
+// Message assignment, for assertion streaming support.
+void AssertHelper::operator=(const Message& message) const {
+ UnitTest::GetInstance()->
+ AddTestPartResult(data_->type, data_->file, data_->line,
+ AppendUserMessage(data_->message, message),
+ UnitTest::GetInstance()->impl()
+ ->CurrentOsStackTraceExceptTop(1)
+ // Skips the stack frame for this function itself.
+ ); // NOLINT
+}
+
+// Mutex for linked pointers.
+GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex);
+
+// A copy of all command line arguments. Set by InitGoogleTest().
+::std::vector<testing::internal::string> g_argvs;
+
+const ::std::vector<testing::internal::string>& GetArgvs() {
+#if defined(GTEST_CUSTOM_GET_ARGVS_)
+ return GTEST_CUSTOM_GET_ARGVS_();
+#else // defined(GTEST_CUSTOM_GET_ARGVS_)
+ return g_argvs;
+#endif // defined(GTEST_CUSTOM_GET_ARGVS_)
+}
+
+// Returns the current application's name, removing directory path if that
+// is present.
+FilePath GetCurrentExecutableName() {
+ FilePath result;
+
+#if GTEST_OS_WINDOWS
+ result.Set(FilePath(GetArgvs()[0]).RemoveExtension("exe"));
+#else
+ result.Set(FilePath(GetArgvs()[0]));
+#endif // GTEST_OS_WINDOWS
+
+ return result.RemoveDirectoryName();
+}
+
+// Functions for processing the gtest_output flag.
+
+// Returns the output format, or "" for normal printed output.
+std::string UnitTestOptions::GetOutputFormat() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ if (gtest_output_flag == NULL) return std::string("");
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ return (colon == NULL) ?
+ std::string(gtest_output_flag) :
+ std::string(gtest_output_flag, colon - gtest_output_flag);
+}
+
+// Returns the name of the requested output file, or the default if none
+// was explicitly specified.
+std::string UnitTestOptions::GetAbsolutePathToOutputFile() {
+ const char* const gtest_output_flag = GTEST_FLAG(output).c_str();
+ if (gtest_output_flag == NULL)
+ return "";
+
+ const char* const colon = strchr(gtest_output_flag, ':');
+ if (colon == NULL)
+ return internal::FilePath::ConcatPaths(
+ internal::FilePath(
+ UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(kDefaultOutputFile)).string();
+
+ internal::FilePath output_name(colon + 1);
+ if (!output_name.IsAbsolutePath())
+ // TODO(wan@google.com): on Windows \some\path is not an absolute
+ // path (as its meaning depends on the current drive), yet the
+ // following logic for turning it into an absolute path is wrong.
+ // Fix it.
+ output_name = internal::FilePath::ConcatPaths(
+ internal::FilePath(UnitTest::GetInstance()->original_working_dir()),
+ internal::FilePath(colon + 1));
+
+ if (!output_name.IsDirectory())
+ return output_name.string();
+
+ internal::FilePath result(internal::FilePath::GenerateUniqueFileName(
+ output_name, internal::GetCurrentExecutableName(),
+ GetOutputFormat().c_str()));
+ return result.string();
+}
+
+// Returns true iff the wildcard pattern matches the string. The
+// first ':' or '\0' character in pattern marks the end of it.
+//
+// This recursive algorithm isn't very efficient, but is clear and
+// works well enough for matching test names, which are short.
+bool UnitTestOptions::PatternMatchesString(const char *pattern,
+ const char *str) {
+ switch (*pattern) {
+ case '\0':
+ case ':': // Either ':' or '\0' marks the end of the pattern.
+ return *str == '\0';
+ case '?': // Matches any single character.
+ return *str != '\0' && PatternMatchesString(pattern + 1, str + 1);
+ case '*': // Matches any string (possibly empty) of characters.
+ return (*str != '\0' && PatternMatchesString(pattern, str + 1)) ||
+ PatternMatchesString(pattern + 1, str);
+ default: // Non-special character. Matches itself.
+ return *pattern == *str &&
+ PatternMatchesString(pattern + 1, str + 1);
+ }
+}
+
+bool UnitTestOptions::MatchesFilter(
+ const std::string& name, const char* filter) {
+ const char *cur_pattern = filter;
+ for (;;) {
+ if (PatternMatchesString(cur_pattern, name.c_str())) {
+ return true;
+ }
+
+ // Finds the next pattern in the filter.
+ cur_pattern = strchr(cur_pattern, ':');
+
+ // Returns if no more pattern can be found.
+ if (cur_pattern == NULL) {
+ return false;
+ }
+
+ // Skips the pattern separater (the ':' character).
+ cur_pattern++;
+ }
+}
+
+// Returns true iff the user-specified filter matches the test case
+// name and the test name.
+bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name,
+ const std::string &test_name) {
+ const std::string& full_name = test_case_name + "." + test_name.c_str();
+
+ // Split --gtest_filter at '-', if there is one, to separate into
+ // positive filter and negative filter portions
+ const char* const p = GTEST_FLAG(filter).c_str();
+ const char* const dash = strchr(p, '-');
+ std::string positive;
+ std::string negative;
+ if (dash == NULL) {
+ positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter
+ negative = "";
+ } else {
+ positive = std::string(p, dash); // Everything up to the dash
+ negative = std::string(dash + 1); // Everything after the dash
+ if (positive.empty()) {
+ // Treat '-test1' as the same as '*-test1'
+ positive = kUniversalFilter;
+ }
+ }
+
+ // A filter is a colon-separated list of patterns. It matches a
+ // test if any pattern in it matches the test.
+ return (MatchesFilter(full_name, positive.c_str()) &&
+ !MatchesFilter(full_name, negative.c_str()));
+}
+
+#if GTEST_HAS_SEH
+// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the
+// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise.
+// This function is useful as an __except condition.
+int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) {
+ // Google Test should handle a SEH exception if:
+ // 1. the user wants it to, AND
+ // 2. this is not a breakpoint exception, AND
+ // 3. this is not a C++ exception (VC++ implements them via SEH,
+ // apparently).
+ //
+ // SEH exception code for C++ exceptions.
+ // (see http://support.microsoft.com/kb/185294 for more information).
+ const DWORD kCxxExceptionCode = 0xe06d7363;
+
+ bool should_handle = true;
+
+ if (!GTEST_FLAG(catch_exceptions))
+ should_handle = false;
+ else if (exception_code == EXCEPTION_BREAKPOINT)
+ should_handle = false;
+ else if (exception_code == kCxxExceptionCode)
+ should_handle = false;
+
+ return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH;
+}
+#endif // GTEST_HAS_SEH
+
+} // namespace internal
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results. Intercepts only failures from the current thread.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ TestPartResultArray* result)
+ : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD),
+ result_(result) {
+ Init();
+}
+
+// The c'tor sets this object as the test part result reporter used by
+// Google Test. The 'result' parameter specifies where to report the
+// results.
+ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter(
+ InterceptMode intercept_mode, TestPartResultArray* result)
+ : intercept_mode_(intercept_mode),
+ result_(result) {
+ Init();
+}
+
+void ScopedFakeTestPartResultReporter::Init() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ old_reporter_ = impl->GetGlobalTestPartResultReporter();
+ impl->SetGlobalTestPartResultReporter(this);
+ } else {
+ old_reporter_ = impl->GetTestPartResultReporterForCurrentThread();
+ impl->SetTestPartResultReporterForCurrentThread(this);
+ }
+}
+
+// The d'tor restores the test part result reporter used by Google Test
+// before.
+ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ if (intercept_mode_ == INTERCEPT_ALL_THREADS) {
+ impl->SetGlobalTestPartResultReporter(old_reporter_);
+ } else {
+ impl->SetTestPartResultReporterForCurrentThread(old_reporter_);
+ }
+}
+
+// Increments the test part result count and remembers the result.
+// This method is from the TestPartResultReporterInterface interface.
+void ScopedFakeTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ result_->Append(result);
+}
+
+namespace internal {
+
+// Returns the type ID of ::testing::Test. We should always call this
+// instead of GetTypeId< ::testing::Test>() to get the type ID of
+// testing::Test. This is to work around a suspected linker bug when
+// using Google Test as a framework on Mac OS X. The bug causes
+// GetTypeId< ::testing::Test>() to return different values depending
+// on whether the call is from the Google Test framework itself or
+// from user test code. GetTestTypeId() is guaranteed to always
+// return the same value, as it always calls GetTypeId<>() from the
+// gtest.cc, which is within the Google Test framework.
+TypeId GetTestTypeId() {
+ return GetTypeId<Test>();
+}
+
+// The value of GetTestTypeId() as seen from within the Google Test
+// library. This is solely for testing GetTestTypeId().
+extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId();
+
+// This predicate-formatter checks that 'results' contains a test part
+// failure of the given type and that the failure message contains the
+// given substring.
+AssertionResult HasOneFailure(const char* /* results_expr */,
+ const char* /* type_expr */,
+ const char* /* substr_expr */,
+ const TestPartResultArray& results,
+ TestPartResult::Type type,
+ const string& substr) {
+ const std::string expected(type == TestPartResult::kFatalFailure ?
+ "1 fatal failure" :
+ "1 non-fatal failure");
+ Message msg;
+ if (results.size() != 1) {
+ msg << "Expected: " << expected << "\n"
+ << " Actual: " << results.size() << " failures";
+ for (int i = 0; i < results.size(); i++) {
+ msg << "\n" << results.GetTestPartResult(i);
+ }
+ return AssertionFailure() << msg;
+ }
+
+ const TestPartResult& r = results.GetTestPartResult(0);
+ if (r.type() != type) {
+ return AssertionFailure() << "Expected: " << expected << "\n"
+ << " Actual:\n"
+ << r;
+ }
+
+ if (strstr(r.message(), substr.c_str()) == NULL) {
+ return AssertionFailure() << "Expected: " << expected << " containing \""
+ << substr << "\"\n"
+ << " Actual:\n"
+ << r;
+ }
+
+ return AssertionSuccess();
+}
+
+// The constructor of SingleFailureChecker remembers where to look up
+// test part results, what type of failure we expect, and what
+// substring the failure message should contain.
+SingleFailureChecker:: SingleFailureChecker(
+ const TestPartResultArray* results,
+ TestPartResult::Type type,
+ const string& substr)
+ : results_(results),
+ type_(type),
+ substr_(substr) {}
+
+// The destructor of SingleFailureChecker verifies that the given
+// TestPartResultArray contains exactly one failure that has the given
+// type and contains the given substring. If that's not the case, a
+// non-fatal failure will be generated.
+SingleFailureChecker::~SingleFailureChecker() {
+ EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_);
+}
+
+DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultGlobalTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->current_test_result()->AddTestPartResult(result);
+ unit_test_->listeners()->repeater()->OnTestPartResult(result);
+}
+
+DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter(
+ UnitTestImpl* unit_test) : unit_test_(unit_test) {}
+
+void DefaultPerThreadTestPartResultReporter::ReportTestPartResult(
+ const TestPartResult& result) {
+ unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result);
+}
+
+// Returns the global test part result reporter.
+TestPartResultReporterInterface*
+UnitTestImpl::GetGlobalTestPartResultReporter() {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ return global_test_part_result_repoter_;
+}
+
+// Sets the global test part result reporter.
+void UnitTestImpl::SetGlobalTestPartResultReporter(
+ TestPartResultReporterInterface* reporter) {
+ internal::MutexLock lock(&global_test_part_result_reporter_mutex_);
+ global_test_part_result_repoter_ = reporter;
+}
+
+// Returns the test part result reporter for the current thread.
+TestPartResultReporterInterface*
+UnitTestImpl::GetTestPartResultReporterForCurrentThread() {
+ return per_thread_test_part_result_reporter_.get();
+}
+
+// Sets the test part result reporter for the current thread.
+void UnitTestImpl::SetTestPartResultReporterForCurrentThread(
+ TestPartResultReporterInterface* reporter) {
+ per_thread_test_part_result_reporter_.set(reporter);
+}
+
+// Gets the number of successful test cases.
+int UnitTestImpl::successful_test_case_count() const {
+ return CountIf(test_cases_, TestCasePassed);
+}
+
+// Gets the number of failed test cases.
+int UnitTestImpl::failed_test_case_count() const {
+ return CountIf(test_cases_, TestCaseFailed);
+}
+
+// Gets the number of all test cases.
+int UnitTestImpl::total_test_case_count() const {
+ return static_cast<int>(test_cases_.size());
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTestImpl::test_case_to_run_count() const {
+ return CountIf(test_cases_, ShouldRunTestCase);
+}
+
+// Gets the number of successful tests.
+int UnitTestImpl::successful_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count);
+}
+
+// Gets the number of failed tests.
+int UnitTestImpl::failed_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTestImpl::reportable_disabled_test_count() const {
+ return SumOverTestCaseList(test_cases_,
+ &TestCase::reportable_disabled_test_count);
+}
+
+// Gets the number of disabled tests.
+int UnitTestImpl::disabled_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTestImpl::reportable_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count);
+}
+
+// Gets the number of all tests.
+int UnitTestImpl::total_test_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::total_test_count);
+}
+
+// Gets the number of tests that should run.
+int UnitTestImpl::test_to_run_count() const {
+ return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count);
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// CurrentOsStackTraceExceptTop(1), Foo() will be included in the
+// trace but Bar() and CurrentOsStackTraceExceptTop() won't.
+std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) {
+ return os_stack_trace_getter()->CurrentStackTrace(
+ static_cast<int>(GTEST_FLAG(stack_trace_depth)),
+ skip_count + 1
+ // Skips the user-specified number of frames plus this function
+ // itself.
+ ); // NOLINT
+}
+
+// Returns the current time in milliseconds.
+TimeInMillis GetTimeInMillis() {
+#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__)
+ // Difference between 1970-01-01 and 1601-01-01 in milliseconds.
+ // http://analogous.blogspot.com/2005/04/epoch.html
+ const TimeInMillis kJavaEpochToWinFileTimeDelta =
+ static_cast<TimeInMillis>(116444736UL) * 100000UL;
+ const DWORD kTenthMicrosInMilliSecond = 10000;
+
+ SYSTEMTIME now_systime;
+ FILETIME now_filetime;
+ ULARGE_INTEGER now_int64;
+ // TODO(kenton@google.com): Shouldn't this just use
+ // GetSystemTimeAsFileTime()?
+ GetSystemTime(&now_systime);
+ if (SystemTimeToFileTime(&now_systime, &now_filetime)) {
+ now_int64.LowPart = now_filetime.dwLowDateTime;
+ now_int64.HighPart = now_filetime.dwHighDateTime;
+ now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) -
+ kJavaEpochToWinFileTimeDelta;
+ return now_int64.QuadPart;
+ }
+ return 0;
+#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_
+ __timeb64 now;
+
+ // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996
+ // (deprecated function) there.
+ // TODO(kenton@google.com): Use GetTickCount()? Or use
+ // SystemTimeToFileTime()
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996)
+ _ftime64(&now);
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+
+ return static_cast<TimeInMillis>(now.time) * 1000 + now.millitm;
+#elif GTEST_HAS_GETTIMEOFDAY_
+ struct timeval now;
+ gettimeofday(&now, NULL);
+ return static_cast<TimeInMillis>(now.tv_sec) * 1000 + now.tv_usec / 1000;
+#else
+# error "Don't know how to get the current time on your system."
+#endif
+}
+
+// Utilities
+
+// class String.
+
+#if GTEST_OS_WINDOWS_MOBILE
+// Creates a UTF-16 wide string from the given ANSI string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the wide string, or NULL if the
+// input is NULL.
+LPCWSTR String::AnsiToUtf16(const char* ansi) {
+ if (!ansi) return NULL;
+ const int length = strlen(ansi);
+ const int unicode_length =
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ NULL, 0);
+ WCHAR* unicode = new WCHAR[unicode_length + 1];
+ MultiByteToWideChar(CP_ACP, 0, ansi, length,
+ unicode, unicode_length);
+ unicode[unicode_length] = 0;
+ return unicode;
+}
+
+// Creates an ANSI string from the given wide string, allocating
+// memory using new. The caller is responsible for deleting the return
+// value using delete[]. Returns the ANSI string, or NULL if the
+// input is NULL.
+const char* String::Utf16ToAnsi(LPCWSTR utf16_str) {
+ if (!utf16_str) return NULL;
+ const int ansi_length =
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+ NULL, 0, NULL, NULL);
+ char* ansi = new char[ansi_length + 1];
+ WideCharToMultiByte(CP_ACP, 0, utf16_str, -1,
+ ansi, ansi_length, NULL, NULL);
+ ansi[ansi_length] = 0;
+ return ansi;
+}
+
+#endif // GTEST_OS_WINDOWS_MOBILE
+
+// Compares two C strings. Returns true iff they have the same content.
+//
+// Unlike strcmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CStringEquals(const char * lhs, const char * rhs) {
+ if ( lhs == NULL ) return rhs == NULL;
+
+ if ( rhs == NULL ) return false;
+
+ return strcmp(lhs, rhs) == 0;
+}
+
+#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+// Converts an array of wide chars to a narrow string using the UTF-8
+// encoding, and streams the result to the given Message object.
+static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length,
+ Message* msg) {
+ for (size_t i = 0; i != length; ) { // NOLINT
+ if (wstr[i] != L'\0') {
+ *msg << WideStringToUtf8(wstr + i, static_cast<int>(length - i));
+ while (i != length && wstr[i] != L'\0')
+ i++;
+ } else {
+ *msg << '\0';
+ i++;
+ }
+ }
+}
+
+#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING
+
+void SplitString(const ::std::string& str, char delimiter,
+ ::std::vector< ::std::string>* dest) {
+ ::std::vector< ::std::string> parsed;
+ ::std::string::size_type pos = 0;
+ while (::testing::internal::AlwaysTrue()) {
+ const ::std::string::size_type colon = str.find(delimiter, pos);
+ if (colon == ::std::string::npos) {
+ parsed.push_back(str.substr(pos));
+ break;
+ } else {
+ parsed.push_back(str.substr(pos, colon - pos));
+ pos = colon + 1;
+ }
+ }
+ dest->swap(parsed);
+}
+
+} // namespace internal
+
+// Constructs an empty Message.
+// We allocate the stringstream separately because otherwise each use of
+// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's
+// stack frame leading to huge stack frames in some cases; gcc does not reuse
+// the stack space.
+Message::Message() : ss_(new ::std::stringstream) {
+ // By default, we want there to be enough precision when printing
+ // a double to a Message.
+ *ss_ << std::setprecision(std::numeric_limits<double>::digits10 + 2);
+}
+
+// These two overloads allow streaming a wide C string to a Message
+// using the UTF-8 encoding.
+Message& Message::operator <<(const wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+}
+Message& Message::operator <<(wchar_t* wide_c_str) {
+ return *this << internal::String::ShowWideCString(wide_c_str);
+}
+
+#if GTEST_HAS_STD_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::std::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+#if GTEST_HAS_GLOBAL_WSTRING
+// Converts the given wide string to a narrow string using the UTF-8
+// encoding, and streams the result to this Message object.
+Message& Message::operator <<(const ::wstring& wstr) {
+ internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this);
+ return *this;
+}
+#endif // GTEST_HAS_GLOBAL_WSTRING
+
+// Gets the text streamed to this object so far as an std::string.
+// Each '\0' character in the buffer is replaced with "\\0".
+std::string Message::GetString() const {
+ return internal::StringStreamToString(ss_.get());
+}
+
+// AssertionResult constructors.
+// Used in EXPECT_TRUE/FALSE(assertion_result).
+AssertionResult::AssertionResult(const AssertionResult& other)
+ : success_(other.success_),
+ message_(other.message_.get() != NULL ?
+ new ::std::string(*other.message_) :
+ static_cast< ::std::string*>(NULL)) {
+}
+
+// Swaps two AssertionResults.
+void AssertionResult::swap(AssertionResult& other) {
+ using std::swap;
+ swap(success_, other.success_);
+ swap(message_, other.message_);
+}
+
+// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE.
+AssertionResult AssertionResult::operator!() const {
+ AssertionResult negation(!success_);
+ if (message_.get() != NULL)
+ negation << *message_;
+ return negation;
+}
+
+// Makes a successful assertion result.
+AssertionResult AssertionSuccess() {
+ return AssertionResult(true);
+}
+
+// Makes a failed assertion result.
+AssertionResult AssertionFailure() {
+ return AssertionResult(false);
+}
+
+// Makes a failed assertion result with the given failure message.
+// Deprecated; use AssertionFailure() << message.
+AssertionResult AssertionFailure(const Message& message) {
+ return AssertionFailure() << message;
+}
+
+namespace internal {
+
+namespace edit_distance {
+std::vector<EditType> CalculateOptimalEdits(const std::vector<size_t>& left,
+ const std::vector<size_t>& right) {
+ std::vector<std::vector<double> > costs(
+ left.size() + 1, std::vector<double>(right.size() + 1));
+ std::vector<std::vector<EditType> > best_move(
+ left.size() + 1, std::vector<EditType>(right.size() + 1));
+
+ // Populate for empty right.
+ for (size_t l_i = 0; l_i < costs.size(); ++l_i) {
+ costs[l_i][0] = static_cast<double>(l_i);
+ best_move[l_i][0] = kRemove;
+ }
+ // Populate for empty left.
+ for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) {
+ costs[0][r_i] = static_cast<double>(r_i);
+ best_move[0][r_i] = kAdd;
+ }
+
+ for (size_t l_i = 0; l_i < left.size(); ++l_i) {
+ for (size_t r_i = 0; r_i < right.size(); ++r_i) {
+ if (left[l_i] == right[r_i]) {
+ // Found a match. Consume it.
+ costs[l_i + 1][r_i + 1] = costs[l_i][r_i];
+ best_move[l_i + 1][r_i + 1] = kMatch;
+ continue;
+ }
+
+ const double add = costs[l_i + 1][r_i];
+ const double remove = costs[l_i][r_i + 1];
+ const double replace = costs[l_i][r_i];
+ if (add < remove && add < replace) {
+ costs[l_i + 1][r_i + 1] = add + 1;
+ best_move[l_i + 1][r_i + 1] = kAdd;
+ } else if (remove < add && remove < replace) {
+ costs[l_i + 1][r_i + 1] = remove + 1;
+ best_move[l_i + 1][r_i + 1] = kRemove;
+ } else {
+ // We make replace a little more expensive than add/remove to lower
+ // their priority.
+ costs[l_i + 1][r_i + 1] = replace + 1.00001;
+ best_move[l_i + 1][r_i + 1] = kReplace;
+ }
+ }
+ }
+
+ // Reconstruct the best path. We do it in reverse order.
+ std::vector<EditType> best_path;
+ for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) {
+ EditType move = best_move[l_i][r_i];
+ best_path.push_back(move);
+ l_i -= move != kAdd;
+ r_i -= move != kRemove;
+ }
+ std::reverse(best_path.begin(), best_path.end());
+ return best_path;
+}
+
+namespace {
+
+// Helper class to convert string into ids with deduplication.
+class InternalStrings {
+ public:
+ size_t GetId(const std::string& str) {
+ IdMap::iterator it = ids_.find(str);
+ if (it != ids_.end()) return it->second;
+ size_t id = ids_.size();
+ return ids_[str] = id;
+ }
+
+ private:
+ typedef std::map<std::string, size_t> IdMap;
+ IdMap ids_;
+};
+
+} // namespace
+
+std::vector<EditType> CalculateOptimalEdits(
+ const std::vector<std::string>& left,
+ const std::vector<std::string>& right) {
+ std::vector<size_t> left_ids, right_ids;
+ {
+ InternalStrings intern_table;
+ for (size_t i = 0; i < left.size(); ++i) {
+ left_ids.push_back(intern_table.GetId(left[i]));
+ }
+ for (size_t i = 0; i < right.size(); ++i) {
+ right_ids.push_back(intern_table.GetId(right[i]));
+ }
+ }
+ return CalculateOptimalEdits(left_ids, right_ids);
+}
+
+namespace {
+
+// Helper class that holds the state for one hunk and prints it out to the
+// stream.
+// It reorders adds/removes when possible to group all removes before all
+// adds. It also adds the hunk header before printint into the stream.
+class Hunk {
+ public:
+ Hunk(size_t left_start, size_t right_start)
+ : left_start_(left_start),
+ right_start_(right_start),
+ adds_(),
+ removes_(),
+ common_() {}
+
+ void PushLine(char edit, const char* line) {
+ switch (edit) {
+ case ' ':
+ ++common_;
+ FlushEdits();
+ hunk_.push_back(std::make_pair(' ', line));
+ break;
+ case '-':
+ ++removes_;
+ hunk_removes_.push_back(std::make_pair('-', line));
+ break;
+ case '+':
+ ++adds_;
+ hunk_adds_.push_back(std::make_pair('+', line));
+ break;
+ }
+ }
+
+ void PrintTo(std::ostream* os) {
+ PrintHeader(os);
+ FlushEdits();
+ for (std::list<std::pair<char, const char*> >::const_iterator it =
+ hunk_.begin();
+ it != hunk_.end(); ++it) {
+ *os << it->first << it->second << "\n";
+ }
+ }
+
+ bool has_edits() const { return adds_ || removes_; }
+
+ private:
+ void FlushEdits() {
+ hunk_.splice(hunk_.end(), hunk_removes_);
+ hunk_.splice(hunk_.end(), hunk_adds_);
+ }
+
+ // Print a unified diff header for one hunk.
+ // The format is
+ // "@@ -<left_start>,<left_length> +<right_start>,<right_length> @@"
+ // where the left/right parts are ommitted if unnecessary.
+ void PrintHeader(std::ostream* ss) const {
+ *ss << "@@ ";
+ if (removes_) {
+ *ss << "-" << left_start_ << "," << (removes_ + common_);
+ }
+ if (removes_ && adds_) {
+ *ss << " ";
+ }
+ if (adds_) {
+ *ss << "+" << right_start_ << "," << (adds_ + common_);
+ }
+ *ss << " @@\n";
+ }
+
+ size_t left_start_, right_start_;
+ size_t adds_, removes_, common_;
+ std::list<std::pair<char, const char*> > hunk_, hunk_adds_, hunk_removes_;
+};
+
+} // namespace
+
+// Create a list of diff hunks in Unified diff format.
+// Each hunk has a header generated by PrintHeader above plus a body with
+// lines prefixed with ' ' for no change, '-' for deletion and '+' for
+// addition.
+// 'context' represents the desired unchanged prefix/suffix around the diff.
+// If two hunks are close enough that their contexts overlap, then they are
+// joined into one hunk.
+std::string CreateUnifiedDiff(const std::vector<std::string>& left,
+ const std::vector<std::string>& right,
+ size_t context) {
+ const std::vector<EditType> edits = CalculateOptimalEdits(left, right);
+
+ size_t l_i = 0, r_i = 0, edit_i = 0;
+ std::stringstream ss;
+ while (edit_i < edits.size()) {
+ // Find first edit.
+ while (edit_i < edits.size() && edits[edit_i] == kMatch) {
+ ++l_i;
+ ++r_i;
+ ++edit_i;
+ }
+
+ // Find the first line to include in the hunk.
+ const size_t prefix_context = std::min(l_i, context);
+ Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1);
+ for (size_t i = prefix_context; i > 0; --i) {
+ hunk.PushLine(' ', left[l_i - i].c_str());
+ }
+
+ // Iterate the edits until we found enough suffix for the hunk or the input
+ // is over.
+ size_t n_suffix = 0;
+ for (; edit_i < edits.size(); ++edit_i) {
+ if (n_suffix >= context) {
+ // Continue only if the next hunk is very close.
+ std::vector<EditType>::const_iterator it = edits.begin() + edit_i;
+ while (it != edits.end() && *it == kMatch) ++it;
+ if (it == edits.end() || (it - edits.begin()) - edit_i >= context) {
+ // There is no next edit or it is too far away.
+ break;
+ }
+ }
+
+ EditType edit = edits[edit_i];
+ // Reset count when a non match is found.
+ n_suffix = edit == kMatch ? n_suffix + 1 : 0;
+
+ if (edit == kMatch || edit == kRemove || edit == kReplace) {
+ hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str());
+ }
+ if (edit == kAdd || edit == kReplace) {
+ hunk.PushLine('+', right[r_i].c_str());
+ }
+
+ // Advance indices, depending on edit type.
+ l_i += edit != kAdd;
+ r_i += edit != kRemove;
+ }
+
+ if (!hunk.has_edits()) {
+ // We are done. We don't want this hunk.
+ break;
+ }
+
+ hunk.PrintTo(&ss);
+ }
+ return ss.str();
+}
+
+} // namespace edit_distance
+
+namespace {
+
+// The string representation of the values received in EqFailure() are already
+// escaped. Split them on escaped '\n' boundaries. Leave all other escaped
+// characters the same.
+std::vector<std::string> SplitEscapedString(const std::string& str) {
+ std::vector<std::string> lines;
+ size_t start = 0, end = str.size();
+ if (end > 2 && str[0] == '"' && str[end - 1] == '"') {
+ ++start;
+ --end;
+ }
+ bool escaped = false;
+ for (size_t i = start; i + 1 < end; ++i) {
+ if (escaped) {
+ escaped = false;
+ if (str[i] == 'n') {
+ lines.push_back(str.substr(start, i - start - 1));
+ start = i + 1;
+ }
+ } else {
+ escaped = str[i] == '\\';
+ }
+ }
+ lines.push_back(str.substr(start, end - start));
+ return lines;
+}
+
+} // namespace
+
+// Constructs and returns the message for an equality assertion
+// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure.
+//
+// The first four parameters are the expressions used in the assertion
+// and their values, as strings. For example, for ASSERT_EQ(foo, bar)
+// where foo is 5 and bar is 6, we have:
+//
+// lhs_expression: "foo"
+// rhs_expression: "bar"
+// lhs_value: "5"
+// rhs_value: "6"
+//
+// The ignoring_case parameter is true iff the assertion is a
+// *_STRCASEEQ*. When it's true, the string "Ignoring case" will
+// be inserted into the message.
+AssertionResult EqFailure(const char* lhs_expression,
+ const char* rhs_expression,
+ const std::string& lhs_value,
+ const std::string& rhs_value,
+ bool ignoring_case) {
+ Message msg;
+ msg << " Expected: " << lhs_expression;
+ if (lhs_value != lhs_expression) {
+ msg << "\n Which is: " << lhs_value;
+ }
+ msg << "\nTo be equal to: " << rhs_expression;
+ if (rhs_value != rhs_expression) {
+ msg << "\n Which is: " << rhs_value;
+ }
+
+ if (ignoring_case) {
+ msg << "\nIgnoring case";
+ }
+
+ if (!lhs_value.empty() && !rhs_value.empty()) {
+ const std::vector<std::string> lhs_lines =
+ SplitEscapedString(lhs_value);
+ const std::vector<std::string> rhs_lines =
+ SplitEscapedString(rhs_value);
+ if (lhs_lines.size() > 1 || rhs_lines.size() > 1) {
+ msg << "\nWith diff:\n"
+ << edit_distance::CreateUnifiedDiff(lhs_lines, rhs_lines);
+ }
+ }
+
+ return AssertionFailure() << msg;
+}
+
+// Constructs a failure message for Boolean assertions such as EXPECT_TRUE.
+std::string GetBoolAssertionFailureMessage(
+ const AssertionResult& assertion_result,
+ const char* expression_text,
+ const char* actual_predicate_value,
+ const char* expected_predicate_value) {
+ const char* actual_message = assertion_result.message();
+ Message msg;
+ msg << "Value of: " << expression_text
+ << "\n Actual: " << actual_predicate_value;
+ if (actual_message[0] != '\0')
+ msg << " (" << actual_message << ")";
+ msg << "\nExpected: " << expected_predicate_value;
+ return msg.GetString();
+}
+
+// Helper function for implementing ASSERT_NEAR.
+AssertionResult DoubleNearPredFormat(const char* expr1,
+ const char* expr2,
+ const char* abs_error_expr,
+ double val1,
+ double val2,
+ double abs_error) {
+ const double diff = fabs(val1 - val2);
+ if (diff <= abs_error) return AssertionSuccess();
+
+ // TODO(wan): do not print the value of an expression if it's
+ // already a literal.
+ return AssertionFailure()
+ << "The difference between " << expr1 << " and " << expr2
+ << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n"
+ << expr1 << " evaluates to " << val1 << ",\n"
+ << expr2 << " evaluates to " << val2 << ", and\n"
+ << abs_error_expr << " evaluates to " << abs_error << ".";
+}
+
+
+// Helper template for implementing FloatLE() and DoubleLE().
+template <typename RawType>
+AssertionResult FloatingPointLE(const char* expr1,
+ const char* expr2,
+ RawType val1,
+ RawType val2) {
+ // Returns success if val1 is less than val2,
+ if (val1 < val2) {
+ return AssertionSuccess();
+ }
+
+ // or if val1 is almost equal to val2.
+ const FloatingPoint<RawType> lhs(val1), rhs(val2);
+ if (lhs.AlmostEquals(rhs)) {
+ return AssertionSuccess();
+ }
+
+ // Note that the above two checks will both fail if either val1 or
+ // val2 is NaN, as the IEEE floating-point standard requires that
+ // any predicate involving a NaN must return false.
+
+ ::std::stringstream val1_ss;
+ val1_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val1;
+
+ ::std::stringstream val2_ss;
+ val2_ss << std::setprecision(std::numeric_limits<RawType>::digits10 + 2)
+ << val2;
+
+ return AssertionFailure()
+ << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n"
+ << " Actual: " << StringStreamToString(&val1_ss) << " vs "
+ << StringStreamToString(&val2_ss);
+}
+
+} // namespace internal
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult FloatLE(const char* expr1, const char* expr2,
+ float val1, float val2) {
+ return internal::FloatingPointLE<float>(expr1, expr2, val1, val2);
+}
+
+// Asserts that val1 is less than, or almost equal to, val2. Fails
+// otherwise. In particular, it fails if either val1 or val2 is NaN.
+AssertionResult DoubleLE(const char* expr1, const char* expr2,
+ double val1, double val2) {
+ return internal::FloatingPointLE<double>(expr1, expr2, val1, val2);
+}
+
+namespace internal {
+
+// The helper function for {ASSERT|EXPECT}_EQ with int or enum
+// arguments.
+AssertionResult CmpHelperEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ BiggestInt lhs,
+ BiggestInt rhs) {
+ if (lhs == rhs) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ FormatForComparisonFailureMessage(lhs, rhs),
+ FormatForComparisonFailureMessage(rhs, lhs),
+ false);
+}
+
+// A macro for implementing the helper functions needed to implement
+// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here
+// just to avoid copy-and-paste of similar code.
+#define GTEST_IMPL_CMP_HELPER_(op_name, op)\
+AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \
+ BiggestInt val1, BiggestInt val2) {\
+ if (val1 op val2) {\
+ return AssertionSuccess();\
+ } else {\
+ return AssertionFailure() \
+ << "Expected: (" << expr1 << ") " #op " (" << expr2\
+ << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\
+ << " vs " << FormatForComparisonFailureMessage(val2, val1);\
+ }\
+}
+
+// Implements the helper function for {ASSERT|EXPECT}_NE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(NE, !=)
+// Implements the helper function for {ASSERT|EXPECT}_LE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LE, <=)
+// Implements the helper function for {ASSERT|EXPECT}_LT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(LT, < )
+// Implements the helper function for {ASSERT|EXPECT}_GE with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GE, >=)
+// Implements the helper function for {ASSERT|EXPECT}_GT with int or
+// enum arguments.
+GTEST_IMPL_CMP_HELPER_(GT, > )
+
+#undef GTEST_IMPL_CMP_HELPER_
+
+// The helper function for {ASSERT|EXPECT}_STREQ.
+AssertionResult CmpHelperSTREQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const char* lhs,
+ const char* rhs) {
+ if (String::CStringEquals(lhs, rhs)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ PrintToString(lhs),
+ PrintToString(rhs),
+ false);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASEEQ.
+AssertionResult CmpHelperSTRCASEEQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const char* lhs,
+ const char* rhs) {
+ if (String::CaseInsensitiveCStringEquals(lhs, rhs)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ PrintToString(lhs),
+ PrintToString(rhs),
+ true);
+}
+
+// The helper function for {ASSERT|EXPECT}_STRNE.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ }
+}
+
+// The helper function for {ASSERT|EXPECT}_STRCASENE.
+AssertionResult CmpHelperSTRCASENE(const char* s1_expression,
+ const char* s2_expression,
+ const char* s1,
+ const char* s2) {
+ if (!String::CaseInsensitiveCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ } else {
+ return AssertionFailure()
+ << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << ") (ignoring case), actual: \""
+ << s1 << "\" vs \"" << s2 << "\"";
+ }
+}
+
+} // namespace internal
+
+namespace {
+
+// Helper functions for implementing IsSubString() and IsNotSubstring().
+
+// This group of overloaded functions return true iff needle is a
+// substring of haystack. NULL is considered a substring of itself
+// only.
+
+bool IsSubstringPred(const char* needle, const char* haystack) {
+ if (needle == NULL || haystack == NULL)
+ return needle == haystack;
+
+ return strstr(haystack, needle) != NULL;
+}
+
+bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) {
+ if (needle == NULL || haystack == NULL)
+ return needle == haystack;
+
+ return wcsstr(haystack, needle) != NULL;
+}
+
+// StringType here can be either ::std::string or ::std::wstring.
+template <typename StringType>
+bool IsSubstringPred(const StringType& needle,
+ const StringType& haystack) {
+ return haystack.find(needle) != StringType::npos;
+}
+
+// This function implements either IsSubstring() or IsNotSubstring(),
+// depending on the value of the expected_to_be_substring parameter.
+// StringType here can be const char*, const wchar_t*, ::std::string,
+// or ::std::wstring.
+template <typename StringType>
+AssertionResult IsSubstringImpl(
+ bool expected_to_be_substring,
+ const char* needle_expr, const char* haystack_expr,
+ const StringType& needle, const StringType& haystack) {
+ if (IsSubstringPred(needle, haystack) == expected_to_be_substring)
+ return AssertionSuccess();
+
+ const bool is_wide_string = sizeof(needle[0]) > 1;
+ const char* const begin_string_quote = is_wide_string ? "L\"" : "\"";
+ return AssertionFailure()
+ << "Value of: " << needle_expr << "\n"
+ << " Actual: " << begin_string_quote << needle << "\"\n"
+ << "Expected: " << (expected_to_be_substring ? "" : "not ")
+ << "a substring of " << haystack_expr << "\n"
+ << "Which is: " << begin_string_quote << haystack << "\"";
+}
+
+} // namespace
+
+// IsSubstring() and IsNotSubstring() check whether needle is a
+// substring of haystack (NULL is considered a substring of itself
+// only), and return an appropriate error message when they fail.
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const char* needle, const char* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const wchar_t* needle, const wchar_t* haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::string& needle, const ::std::string& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+
+#if GTEST_HAS_STD_WSTRING
+AssertionResult IsSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack);
+}
+
+AssertionResult IsNotSubstring(
+ const char* needle_expr, const char* haystack_expr,
+ const ::std::wstring& needle, const ::std::wstring& haystack) {
+ return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack);
+}
+#endif // GTEST_HAS_STD_WSTRING
+
+namespace internal {
+
+#if GTEST_OS_WINDOWS
+
+namespace {
+
+// Helper function for IsHRESULT{SuccessFailure} predicates
+AssertionResult HRESULTFailureHelper(const char* expr,
+ const char* expected,
+ long hr) { // NOLINT
+# if GTEST_OS_WINDOWS_MOBILE
+
+ // Windows CE doesn't support FormatMessage.
+ const char error_text[] = "";
+
+# else
+
+ // Looks up the human-readable system message for the HRESULT code
+ // and since we're not passing any params to FormatMessage, we don't
+ // want inserts expanded.
+ const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS;
+ const DWORD kBufSize = 4096;
+ // Gets the system's human readable message string for this HRESULT.
+ char error_text[kBufSize] = { '\0' };
+ DWORD message_length = ::FormatMessageA(kFlags,
+ 0, // no source, we're asking system
+ hr, // the error
+ 0, // no line width restrictions
+ error_text, // output buffer
+ kBufSize, // buf size
+ NULL); // no arguments for inserts
+ // Trims tailing white space (FormatMessage leaves a trailing CR-LF)
+ for (; message_length && IsSpace(error_text[message_length - 1]);
+ --message_length) {
+ error_text[message_length - 1] = '\0';
+ }
+
+# endif // GTEST_OS_WINDOWS_MOBILE
+
+ const std::string error_hex("0x" + String::FormatHexInt(hr));
+ return ::testing::AssertionFailure()
+ << "Expected: " << expr << " " << expected << ".\n"
+ << " Actual: " << error_hex << " " << error_text << "\n";
+}
+
+} // namespace
+
+AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT
+ if (SUCCEEDED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "succeeds", hr);
+}
+
+AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT
+ if (FAILED(hr)) {
+ return AssertionSuccess();
+ }
+ return HRESULTFailureHelper(expr, "fails", hr);
+}
+
+#endif // GTEST_OS_WINDOWS
+
+// Utility functions for encoding Unicode text (wide strings) in
+// UTF-8.
+
+// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8
+// like this:
+//
+// Code-point length Encoding
+// 0 - 7 bits 0xxxxxxx
+// 8 - 11 bits 110xxxxx 10xxxxxx
+// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx
+// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
+
+// The maximum code-point a one-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint1 = (static_cast<UInt32>(1) << 7) - 1;
+
+// The maximum code-point a two-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint2 = (static_cast<UInt32>(1) << (5 + 6)) - 1;
+
+// The maximum code-point a three-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint3 = (static_cast<UInt32>(1) << (4 + 2*6)) - 1;
+
+// The maximum code-point a four-byte UTF-8 sequence can represent.
+const UInt32 kMaxCodePoint4 = (static_cast<UInt32>(1) << (3 + 3*6)) - 1;
+
+// Chops off the n lowest bits from a bit pattern. Returns the n
+// lowest bits. As a side effect, the original bit pattern will be
+// shifted to the right by n bits.
+inline UInt32 ChopLowBits(UInt32* bits, int n) {
+ const UInt32 low_bits = *bits & ((static_cast<UInt32>(1) << n) - 1);
+ *bits >>= n;
+ return low_bits;
+}
+
+// Converts a Unicode code point to a narrow string in UTF-8 encoding.
+// code_point parameter is of type UInt32 because wchar_t may not be
+// wide enough to contain a code point.
+// If the code_point is not a valid Unicode code point
+// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted
+// to "(Invalid Unicode 0xXXXXXXXX)".
+std::string CodePointToUtf8(UInt32 code_point) {
+ if (code_point > kMaxCodePoint4) {
+ return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")";
+ }
+
+ char str[5]; // Big enough for the largest valid code point.
+ if (code_point <= kMaxCodePoint1) {
+ str[1] = '\0';
+ str[0] = static_cast<char>(code_point); // 0xxxxxxx
+ } else if (code_point <= kMaxCodePoint2) {
+ str[2] = '\0';
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xC0 | code_point); // 110xxxxx
+ } else if (code_point <= kMaxCodePoint3) {
+ str[3] = '\0';
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xE0 | code_point); // 1110xxxx
+ } else { // code_point <= kMaxCodePoint4
+ str[4] = '\0';
+ str[3] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[2] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[1] = static_cast<char>(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx
+ str[0] = static_cast<char>(0xF0 | code_point); // 11110xxx
+ }
+ return str;
+}
+
+// The following two functions only make sense if the the system
+// uses UTF-16 for wide string encoding. All supported systems
+// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16.
+
+// Determines if the arguments constitute UTF-16 surrogate pair
+// and thus should be combined into a single Unicode code point
+// using CreateCodePointFromUtf16SurrogatePair.
+inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) {
+ return sizeof(wchar_t) == 2 &&
+ (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00;
+}
+
+// Creates a Unicode code point from UTF16 surrogate pair.
+inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first,
+ wchar_t second) {
+ const UInt32 mask = (1 << 10) - 1;
+ return (sizeof(wchar_t) == 2) ?
+ (((first & mask) << 10) | (second & mask)) + 0x10000 :
+ // This function should not be called when the condition is
+ // false, but we provide a sensible default in case it is.
+ static_cast<UInt32>(first);
+}
+
+// Converts a wide string to a narrow string in UTF-8 encoding.
+// The wide string is assumed to have the following encoding:
+// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS)
+// UTF-32 if sizeof(wchar_t) == 4 (on Linux)
+// Parameter str points to a null-terminated wide string.
+// Parameter num_chars may additionally limit the number
+// of wchar_t characters processed. -1 is used when the entire string
+// should be processed.
+// If the string contains code points that are not valid Unicode code points
+// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output
+// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding
+// and contains invalid UTF-16 surrogate pairs, values in those pairs
+// will be encoded as individual Unicode characters from Basic Normal Plane.
+std::string WideStringToUtf8(const wchar_t* str, int num_chars) {
+ if (num_chars == -1)
+ num_chars = static_cast<int>(wcslen(str));
+
+ ::std::stringstream stream;
+ for (int i = 0; i < num_chars; ++i) {
+ UInt32 unicode_code_point;
+
+ if (str[i] == L'\0') {
+ break;
+ } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) {
+ unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i],
+ str[i + 1]);
+ i++;
+ } else {
+ unicode_code_point = static_cast<UInt32>(str[i]);
+ }
+
+ stream << CodePointToUtf8(unicode_code_point);
+ }
+ return StringStreamToString(&stream);
+}
+
+// Converts a wide C string to an std::string using the UTF-8 encoding.
+// NULL will be converted to "(null)".
+std::string String::ShowWideCString(const wchar_t * wide_c_str) {
+ if (wide_c_str == NULL) return "(null)";
+
+ return internal::WideStringToUtf8(wide_c_str, -1);
+}
+
+// Compares two wide C strings. Returns true iff they have the same
+// content.
+//
+// Unlike wcscmp(), this function can handle NULL argument(s). A NULL
+// C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) {
+ if (lhs == NULL) return rhs == NULL;
+
+ if (rhs == NULL) return false;
+
+ return wcscmp(lhs, rhs) == 0;
+}
+
+// Helper function for *_STREQ on wide strings.
+AssertionResult CmpHelperSTREQ(const char* lhs_expression,
+ const char* rhs_expression,
+ const wchar_t* lhs,
+ const wchar_t* rhs) {
+ if (String::WideCStringEquals(lhs, rhs)) {
+ return AssertionSuccess();
+ }
+
+ return EqFailure(lhs_expression,
+ rhs_expression,
+ PrintToString(lhs),
+ PrintToString(rhs),
+ false);
+}
+
+// Helper function for *_STRNE on wide strings.
+AssertionResult CmpHelperSTRNE(const char* s1_expression,
+ const char* s2_expression,
+ const wchar_t* s1,
+ const wchar_t* s2) {
+ if (!String::WideCStringEquals(s1, s2)) {
+ return AssertionSuccess();
+ }
+
+ return AssertionFailure() << "Expected: (" << s1_expression << ") != ("
+ << s2_expression << "), actual: "
+ << PrintToString(s1)
+ << " vs " << PrintToString(s2);
+}
+
+// Compares two C strings, ignoring case. Returns true iff they have
+// the same content.
+//
+// Unlike strcasecmp(), this function can handle NULL argument(s). A
+// NULL C string is considered different to any non-NULL C string,
+// including the empty string.
+bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) {
+ if (lhs == NULL)
+ return rhs == NULL;
+ if (rhs == NULL)
+ return false;
+ return posix::StrCaseCmp(lhs, rhs) == 0;
+}
+
+ // Compares two wide C strings, ignoring case. Returns true iff they
+ // have the same content.
+ //
+ // Unlike wcscasecmp(), this function can handle NULL argument(s).
+ // A NULL C string is considered different to any non-NULL wide C string,
+ // including the empty string.
+ // NB: The implementations on different platforms slightly differ.
+ // On windows, this method uses _wcsicmp which compares according to LC_CTYPE
+ // environment variable. On GNU platform this method uses wcscasecmp
+ // which compares according to LC_CTYPE category of the current locale.
+ // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the
+ // current locale.
+bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs,
+ const wchar_t* rhs) {
+ if (lhs == NULL) return rhs == NULL;
+
+ if (rhs == NULL) return false;
+
+#if GTEST_OS_WINDOWS
+ return _wcsicmp(lhs, rhs) == 0;
+#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID
+ return wcscasecmp(lhs, rhs) == 0;
+#else
+ // Android, Mac OS X and Cygwin don't define wcscasecmp.
+ // Other unknown OSes may not define it either.
+ wint_t left, right;
+ do {
+ left = towlower(*lhs++);
+ right = towlower(*rhs++);
+ } while (left && left == right);
+ return left == right;
+#endif // OS selector
+}
+
+// Returns true iff str ends with the given suffix, ignoring case.
+// Any string is considered to end with an empty suffix.
+bool String::EndsWithCaseInsensitive(
+ const std::string& str, const std::string& suffix) {
+ const size_t str_len = str.length();
+ const size_t suffix_len = suffix.length();
+ return (str_len >= suffix_len) &&
+ CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len,
+ suffix.c_str());
+}
+
+// Formats an int value as "%02d".
+std::string String::FormatIntWidth2(int value) {
+ std::stringstream ss;
+ ss << std::setfill('0') << std::setw(2) << value;
+ return ss.str();
+}
+
+// Formats an int value as "%X".
+std::string String::FormatHexInt(int value) {
+ std::stringstream ss;
+ ss << std::hex << std::uppercase << value;
+ return ss.str();
+}
+
+// Formats a byte as "%02X".
+std::string String::FormatByte(unsigned char value) {
+ std::stringstream ss;
+ ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase
+ << static_cast<unsigned int>(value);
+ return ss.str();
+}
+
+// Converts the buffer in a stringstream to an std::string, converting NUL
+// bytes to "\\0" along the way.
+std::string StringStreamToString(::std::stringstream* ss) {
+ const ::std::string& str = ss->str();
+ const char* const start = str.c_str();
+ const char* const end = start + str.length();
+
+ std::string result;
+ result.reserve(2 * (end - start));
+ for (const char* ch = start; ch != end; ++ch) {
+ if (*ch == '\0') {
+ result += "\\0"; // Replaces NUL with "\\0";
+ } else {
+ result += *ch;
+ }
+ }
+
+ return result;
+}
+
+// Appends the user-supplied message to the Google-Test-generated message.
+std::string AppendUserMessage(const std::string& gtest_msg,
+ const Message& user_msg) {
+ // Appends the user message if it's non-empty.
+ const std::string user_msg_string = user_msg.GetString();
+ if (user_msg_string.empty()) {
+ return gtest_msg;
+ }
+
+ return gtest_msg + "\n" + user_msg_string;
+}
+
+} // namespace internal
+
+// class TestResult
+
+// Creates an empty TestResult.
+TestResult::TestResult()
+ : death_test_count_(0),
+ elapsed_time_(0) {
+}
+
+// D'tor.
+TestResult::~TestResult() {
+}
+
+// Returns the i-th test part result among all the results. i can
+// range from 0 to total_part_count() - 1. If i is not in that range,
+// aborts the program.
+const TestPartResult& TestResult::GetTestPartResult(int i) const {
+ if (i < 0 || i >= total_part_count())
+ internal::posix::Abort();
+ return test_part_results_.at(i);
+}
+
+// Returns the i-th test property. i can range from 0 to
+// test_property_count() - 1. If i is not in that range, aborts the
+// program.
+const TestProperty& TestResult::GetTestProperty(int i) const {
+ if (i < 0 || i >= test_property_count())
+ internal::posix::Abort();
+ return test_properties_.at(i);
+}
+
+// Clears the test part results.
+void TestResult::ClearTestPartResults() {
+ test_part_results_.clear();
+}
+
+// Adds a test part result to the list.
+void TestResult::AddTestPartResult(const TestPartResult& test_part_result) {
+ test_part_results_.push_back(test_part_result);
+}
+
+// Adds a test property to the list. If a property with the same key as the
+// supplied property is already represented, the value of this test_property
+// replaces the old value for that key.
+void TestResult::RecordProperty(const std::string& xml_element,
+ const TestProperty& test_property) {
+ if (!ValidateTestProperty(xml_element, test_property)) {
+ return;
+ }
+ internal::MutexLock lock(&test_properites_mutex_);
+ const std::vector<TestProperty>::iterator property_with_matching_key =
+ std::find_if(test_properties_.begin(), test_properties_.end(),
+ internal::TestPropertyKeyIs(test_property.key()));
+ if (property_with_matching_key == test_properties_.end()) {
+ test_properties_.push_back(test_property);
+ return;
+ }
+ property_with_matching_key->SetValue(test_property.value());
+}
+
+// The list of reserved attributes used in the <testsuites> element of XML
+// output.
+static const char* const kReservedTestSuitesAttributes[] = {
+ "disabled",
+ "errors",
+ "failures",
+ "name",
+ "random_seed",
+ "tests",
+ "time",
+ "timestamp"
+};
+
+// The list of reserved attributes used in the <testsuite> element of XML
+// output.
+static const char* const kReservedTestSuiteAttributes[] = {
+ "disabled",
+ "errors",
+ "failures",
+ "name",
+ "tests",
+ "time"
+};
+
+// The list of reserved attributes used in the <testcase> element of XML output.
+static const char* const kReservedTestCaseAttributes[] = {
+ "classname",
+ "name",
+ "status",
+ "time",
+ "type_param",
+ "value_param"
+};
+
+template <int kSize>
+std::vector<std::string> ArrayAsVector(const char* const (&array)[kSize]) {
+ return std::vector<std::string>(array, array + kSize);
+}
+
+static std::vector<std::string> GetReservedAttributesForElement(
+ const std::string& xml_element) {
+ if (xml_element == "testsuites") {
+ return ArrayAsVector(kReservedTestSuitesAttributes);
+ } else if (xml_element == "testsuite") {
+ return ArrayAsVector(kReservedTestSuiteAttributes);
+ } else if (xml_element == "testcase") {
+ return ArrayAsVector(kReservedTestCaseAttributes);
+ } else {
+ GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element;
+ }
+ // This code is unreachable but some compilers may not realizes that.
+ return std::vector<std::string>();
+}
+
+static std::string FormatWordList(const std::vector<std::string>& words) {
+ Message word_list;
+ for (size_t i = 0; i < words.size(); ++i) {
+ if (i > 0 && words.size() > 2) {
+ word_list << ", ";
+ }
+ if (i == words.size() - 1) {
+ word_list << "and ";
+ }
+ word_list << "'" << words[i] << "'";
+ }
+ return word_list.GetString();
+}
+
+bool ValidateTestPropertyName(const std::string& property_name,
+ const std::vector<std::string>& reserved_names) {
+ if (std::find(reserved_names.begin(), reserved_names.end(), property_name) !=
+ reserved_names.end()) {
+ ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name
+ << " (" << FormatWordList(reserved_names)
+ << " are reserved by " << GTEST_NAME_ << ")";
+ return false;
+ }
+ return true;
+}
+
+// Adds a failure if the key is a reserved attribute of the element named
+// xml_element. Returns true if the property is valid.
+bool TestResult::ValidateTestProperty(const std::string& xml_element,
+ const TestProperty& test_property) {
+ return ValidateTestPropertyName(test_property.key(),
+ GetReservedAttributesForElement(xml_element));
+}
+
+// Clears the object.
+void TestResult::Clear() {
+ test_part_results_.clear();
+ test_properties_.clear();
+ death_test_count_ = 0;
+ elapsed_time_ = 0;
+}
+
+// Returns true iff the test failed.
+bool TestResult::Failed() const {
+ for (int i = 0; i < total_part_count(); ++i) {
+ if (GetTestPartResult(i).failed())
+ return true;
+ }
+ return false;
+}
+
+// Returns true iff the test part fatally failed.
+static bool TestPartFatallyFailed(const TestPartResult& result) {
+ return result.fatally_failed();
+}
+
+// Returns true iff the test fatally failed.
+bool TestResult::HasFatalFailure() const {
+ return CountIf(test_part_results_, TestPartFatallyFailed) > 0;
+}
+
+// Returns true iff the test part non-fatally failed.
+static bool TestPartNonfatallyFailed(const TestPartResult& result) {
+ return result.nonfatally_failed();
+}
+
+// Returns true iff the test has a non-fatal failure.
+bool TestResult::HasNonfatalFailure() const {
+ return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0;
+}
+
+// Gets the number of all test parts. This is the sum of the number
+// of successful test parts and the number of failed test parts.
+int TestResult::total_part_count() const {
+ return static_cast<int>(test_part_results_.size());
+}
+
+// Returns the number of the test properties.
+int TestResult::test_property_count() const {
+ return static_cast<int>(test_properties_.size());
+}
+
+// class Test
+
+// Creates a Test object.
+
+// The c'tor saves the states of all flags.
+Test::Test()
+ : gtest_flag_saver_(new GTEST_FLAG_SAVER_) {
+}
+
+// The d'tor restores the states of all flags. The actual work is
+// done by the d'tor of the gtest_flag_saver_ field, and thus not
+// visible here.
+Test::~Test() {
+}
+
+// Sets up the test fixture.
+//
+// A sub-class may override this.
+void Test::SetUp() {
+}
+
+// Tears down the test fixture.
+//
+// A sub-class may override this.
+void Test::TearDown() {
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const std::string& key, const std::string& value) {
+ UnitTest::GetInstance()->RecordProperty(key, value);
+}
+
+// Allows user supplied key value pairs to be recorded for later output.
+void Test::RecordProperty(const std::string& key, int value) {
+ Message value_message;
+ value_message << value;
+ RecordProperty(key, value_message.GetString().c_str());
+}
+
+namespace internal {
+
+void ReportFailureInUnknownLocation(TestPartResult::Type result_type,
+ const std::string& message) {
+ // This function is a friend of UnitTest and as such has access to
+ // AddTestPartResult.
+ UnitTest::GetInstance()->AddTestPartResult(
+ result_type,
+ NULL, // No info about the source file where the exception occurred.
+ -1, // We have no info on which line caused the exception.
+ message,
+ ""); // No stack trace, either.
+}
+
+} // namespace internal
+
+// Google Test requires all tests in the same test case to use the same test
+// fixture class. This function checks if the current test has the
+// same fixture class as the first test in the current test case. If
+// yes, it returns true; otherwise it generates a Google Test failure and
+// returns false.
+bool Test::HasSameFixtureClass() {
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ const TestCase* const test_case = impl->current_test_case();
+
+ // Info about the first test in the current test case.
+ const TestInfo* const first_test_info = test_case->test_info_list()[0];
+ const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_;
+ const char* const first_test_name = first_test_info->name();
+
+ // Info about the current test.
+ const TestInfo* const this_test_info = impl->current_test_info();
+ const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_;
+ const char* const this_test_name = this_test_info->name();
+
+ if (this_fixture_id != first_fixture_id) {
+ // Is the first test defined using TEST?
+ const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId();
+ // Is this test defined using TEST?
+ const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId();
+
+ if (first_is_TEST || this_is_TEST) {
+ // Both TEST and TEST_F appear in same test case, which is incorrect.
+ // Tell the user how to fix this.
+
+ // Gets the name of the TEST and the name of the TEST_F. Note
+ // that first_is_TEST and this_is_TEST cannot both be true, as
+ // the fixture IDs are different for the two tests.
+ const char* const TEST_name =
+ first_is_TEST ? first_test_name : this_test_name;
+ const char* const TEST_F_name =
+ first_is_TEST ? this_test_name : first_test_name;
+
+ ADD_FAILURE()
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class, so mixing TEST_F and TEST in the same test case is\n"
+ << "illegal. In test case " << this_test_info->test_case_name()
+ << ",\n"
+ << "test " << TEST_F_name << " is defined using TEST_F but\n"
+ << "test " << TEST_name << " is defined using TEST. You probably\n"
+ << "want to change the TEST to TEST_F or move it to another test\n"
+ << "case.";
+ } else {
+ // Two fixture classes with the same name appear in two different
+ // namespaces, which is not allowed. Tell the user how to fix this.
+ ADD_FAILURE()
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class. However, in test case "
+ << this_test_info->test_case_name() << ",\n"
+ << "you defined test " << first_test_name
+ << " and test " << this_test_name << "\n"
+ << "using two different test fixture classes. This can happen if\n"
+ << "the two classes are from different namespaces or translation\n"
+ << "units and have the same name. You should probably rename one\n"
+ << "of the classes to put the tests into different test cases.";
+ }
+ return false;
+ }
+
+ return true;
+}
+
+#if GTEST_HAS_SEH
+
+// Adds an "exception thrown" fatal failure to the current test. This
+// function returns its result via an output parameter pointer because VC++
+// prohibits creation of objects with destructors on stack in functions
+// using __try (see error C2712).
+static std::string* FormatSehExceptionMessage(DWORD exception_code,
+ const char* location) {
+ Message message;
+ message << "SEH exception with code 0x" << std::setbase(16) <<
+ exception_code << std::setbase(10) << " thrown in " << location << ".";
+
+ return new std::string(message.GetString());
+}
+
+#endif // GTEST_HAS_SEH
+
+namespace internal {
+
+#if GTEST_HAS_EXCEPTIONS
+
+// Adds an "exception thrown" fatal failure to the current test.
+static std::string FormatCxxExceptionMessage(const char* description,
+ const char* location) {
+ Message message;
+ if (description != NULL) {
+ message << "C++ exception with description \"" << description << "\"";
+ } else {
+ message << "Unknown C++ exception";
+ }
+ message << " thrown in " << location << ".";
+
+ return message.GetString();
+}
+
+static std::string PrintTestPartResultToString(
+ const TestPartResult& test_part_result);
+
+GoogleTestFailureException::GoogleTestFailureException(
+ const TestPartResult& failure)
+ : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {}
+
+#endif // GTEST_HAS_EXCEPTIONS
+
+// We put these helper functions in the internal namespace as IBM's xlC
+// compiler rejects the code if they were declared static.
+
+// Runs the given method and handles SEH exceptions it throws, when
+// SEH is supported; returns the 0-value for type Result in case of an
+// SEH exception. (Microsoft compilers cannot handle SEH and C++
+// exceptions in the same function. Therefore, we provide a separate
+// wrapper function for handling SEH exceptions.)
+template <class T, typename Result>
+Result HandleSehExceptionsInMethodIfSupported(
+ T* object, Result (T::*method)(), const char* location) {
+#if GTEST_HAS_SEH
+ __try {
+ return (object->*method)();
+ } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT
+ GetExceptionCode())) {
+ // We create the exception message on the heap because VC++ prohibits
+ // creation of objects with destructors on stack in functions using __try
+ // (see error C2712).
+ std::string* exception_message = FormatSehExceptionMessage(
+ GetExceptionCode(), location);
+ internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure,
+ *exception_message);
+ delete exception_message;
+ return static_cast<Result>(0);
+ }
+#else
+ (void)location;
+ return (object->*method)();
+#endif // GTEST_HAS_SEH
+}
+
+// Runs the given method and catches and reports C++ and/or SEH-style
+// exceptions, if they are supported; returns the 0-value for type
+// Result in case of an SEH exception.
+template <class T, typename Result>
+Result HandleExceptionsInMethodIfSupported(
+ T* object, Result (T::*method)(), const char* location) {
+ // NOTE: The user code can affect the way in which Google Test handles
+ // exceptions by setting GTEST_FLAG(catch_exceptions), but only before
+ // RUN_ALL_TESTS() starts. It is technically possible to check the flag
+ // after the exception is caught and either report or re-throw the
+ // exception based on the flag's value:
+ //
+ // try {
+ // // Perform the test method.
+ // } catch (...) {
+ // if (GTEST_FLAG(catch_exceptions))
+ // // Report the exception as failure.
+ // else
+ // throw; // Re-throws the original exception.
+ // }
+ //
+ // However, the purpose of this flag is to allow the program to drop into
+ // the debugger when the exception is thrown. On most platforms, once the
+ // control enters the catch block, the exception origin information is
+ // lost and the debugger will stop the program at the point of the
+ // re-throw in this function -- instead of at the point of the original
+ // throw statement in the code under test. For this reason, we perform
+ // the check early, sacrificing the ability to affect Google Test's
+ // exception handling in the method where the exception is thrown.
+ if (internal::GetUnitTestImpl()->catch_exceptions()) {
+#if GTEST_HAS_EXCEPTIONS
+ try {
+ return HandleSehExceptionsInMethodIfSupported(object, method, location);
+ } catch (const internal::GoogleTestFailureException&) { // NOLINT
+ // This exception type can only be thrown by a failed Google
+ // Test assertion with the intention of letting another testing
+ // framework catch it. Therefore we just re-throw it.
+ throw;
+ } catch (const std::exception& e) { // NOLINT
+ internal::ReportFailureInUnknownLocation(
+ TestPartResult::kFatalFailure,
+ FormatCxxExceptionMessage(e.what(), location));
+ } catch (...) { // NOLINT
+ internal::ReportFailureInUnknownLocation(
+ TestPartResult::kFatalFailure,
+ FormatCxxExceptionMessage(NULL, location));
+ }
+ return static_cast<Result>(0);
+#else
+ return HandleSehExceptionsInMethodIfSupported(object, method, location);
+#endif // GTEST_HAS_EXCEPTIONS
+ } else {
+ return (object->*method)();
+ }
+}
+
+} // namespace internal
+
+// Runs the test and updates the test result.
+void Test::Run() {
+ if (!HasSameFixtureClass()) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()");
+ // We will run the test only if SetUp() was successful.
+ if (!HasFatalFailure()) {
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &Test::TestBody, "the test body");
+ }
+
+ // However, we want to clean up as much as possible. Hence we will
+ // always call TearDown(), even if SetUp() or the test body has
+ // failed.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &Test::TearDown, "TearDown()");
+}
+
+// Returns true iff the current test has a fatal failure.
+bool Test::HasFatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure();
+}
+
+// Returns true iff the current test has a non-fatal failure.
+bool Test::HasNonfatalFailure() {
+ return internal::GetUnitTestImpl()->current_test_result()->
+ HasNonfatalFailure();
+}
+
+// class TestInfo
+
+// Constructs a TestInfo object. It assumes ownership of the test factory
+// object.
+TestInfo::TestInfo(const std::string& a_test_case_name,
+ const std::string& a_name,
+ const char* a_type_param,
+ const char* a_value_param,
+ internal::CodeLocation a_code_location,
+ internal::TypeId fixture_class_id,
+ internal::TestFactoryBase* factory)
+ : test_case_name_(a_test_case_name),
+ name_(a_name),
+ type_param_(a_type_param ? new std::string(a_type_param) : NULL),
+ value_param_(a_value_param ? new std::string(a_value_param) : NULL),
+ location_(a_code_location),
+ fixture_class_id_(fixture_class_id),
+ should_run_(false),
+ is_disabled_(false),
+ matches_filter_(false),
+ factory_(factory),
+ result_() {}
+
+// Destructs a TestInfo object.
+TestInfo::~TestInfo() { delete factory_; }
+
+namespace internal {
+
+// Creates a new TestInfo object and registers it with Google Test;
+// returns the created object.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// name: name of the test
+// type_param: the name of the test's type parameter, or NULL if
+// this is not a typed or a type-parameterized test.
+// value_param: text representation of the test's value parameter,
+// or NULL if this is not a value-parameterized test.
+// code_location: code location where the test is defined
+// fixture_class_id: ID of the test fixture class
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+// factory: pointer to the factory that creates a test object.
+// The newly created TestInfo instance will assume
+// ownership of the factory object.
+TestInfo* MakeAndRegisterTestInfo(
+ const char* test_case_name,
+ const char* name,
+ const char* type_param,
+ const char* value_param,
+ CodeLocation code_location,
+ TypeId fixture_class_id,
+ SetUpTestCaseFunc set_up_tc,
+ TearDownTestCaseFunc tear_down_tc,
+ TestFactoryBase* factory) {
+ TestInfo* const test_info =
+ new TestInfo(test_case_name, name, type_param, value_param,
+ code_location, fixture_class_id, factory);
+ GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info);
+ return test_info;
+}
+
+#if GTEST_HAS_PARAM_TEST
+void ReportInvalidTestCaseType(const char* test_case_name,
+ CodeLocation code_location) {
+ Message errors;
+ errors
+ << "Attempted redefinition of test case " << test_case_name << ".\n"
+ << "All tests in the same test case must use the same test fixture\n"
+ << "class. However, in test case " << test_case_name << ", you tried\n"
+ << "to define a test using a fixture class different from the one\n"
+ << "used earlier. This can happen if the two fixture classes are\n"
+ << "from different namespaces and have the same name. You should\n"
+ << "probably rename one of the classes to put the tests into different\n"
+ << "test cases.";
+
+ fprintf(stderr, "%s %s",
+ FormatFileLocation(code_location.file.c_str(),
+ code_location.line).c_str(),
+ errors.GetString().c_str());
+}
+#endif // GTEST_HAS_PARAM_TEST
+
+} // namespace internal
+
+namespace {
+
+// A predicate that checks the test name of a TestInfo against a known
+// value.
+//
+// This is used for implementation of the TestCase class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestNameIs is copyable.
+class TestNameIs {
+ public:
+ // Constructor.
+ //
+ // TestNameIs has NO default constructor.
+ explicit TestNameIs(const char* name)
+ : name_(name) {}
+
+ // Returns true iff the test name of test_info matches name_.
+ bool operator()(const TestInfo * test_info) const {
+ return test_info && test_info->name() == name_;
+ }
+
+ private:
+ std::string name_;
+};
+
+} // namespace
+
+namespace internal {
+
+// This method expands all parameterized tests registered with macros TEST_P
+// and INSTANTIATE_TEST_CASE_P into regular tests and registers those.
+// This will be done just once during the program runtime.
+void UnitTestImpl::RegisterParameterizedTests() {
+#if GTEST_HAS_PARAM_TEST
+ if (!parameterized_tests_registered_) {
+ parameterized_test_registry_.RegisterTests();
+ parameterized_tests_registered_ = true;
+ }
+#endif
+}
+
+} // namespace internal
+
+// Creates the test object, runs it, records its result, and then
+// deletes it.
+void TestInfo::Run() {
+ if (!should_run_) return;
+
+ // Tells UnitTest where to store test result.
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_info(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ // Notifies the unit test event listeners that a test is about to start.
+ repeater->OnTestStart(*this);
+
+ const TimeInMillis start = internal::GetTimeInMillis();
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+
+ // Creates the test object.
+ Test* const test = internal::HandleExceptionsInMethodIfSupported(
+ factory_, &internal::TestFactoryBase::CreateTest,
+ "the test fixture's constructor");
+
+ // Runs the test only if the test object was created and its
+ // constructor didn't generate a fatal failure.
+ if ((test != NULL) && !Test::HasFatalFailure()) {
+ // This doesn't throw as all user code that can throw are wrapped into
+ // exception handling code.
+ test->Run();
+ }
+
+ // Deletes the test object.
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ test, &Test::DeleteSelf_, "the test fixture's destructor");
+
+ result_.set_elapsed_time(internal::GetTimeInMillis() - start);
+
+ // Notifies the unit test event listener that a test has just finished.
+ repeater->OnTestEnd(*this);
+
+ // Tells UnitTest to stop associating assertion results to this
+ // test.
+ impl->set_current_test_info(NULL);
+}
+
+// class TestCase
+
+// Gets the number of successful tests in this test case.
+int TestCase::successful_test_count() const {
+ return CountIf(test_info_list_, TestPassed);
+}
+
+// Gets the number of failed tests in this test case.
+int TestCase::failed_test_count() const {
+ return CountIf(test_info_list_, TestFailed);
+}
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int TestCase::reportable_disabled_test_count() const {
+ return CountIf(test_info_list_, TestReportableDisabled);
+}
+
+// Gets the number of disabled tests in this test case.
+int TestCase::disabled_test_count() const {
+ return CountIf(test_info_list_, TestDisabled);
+}
+
+// Gets the number of tests to be printed in the XML report.
+int TestCase::reportable_test_count() const {
+ return CountIf(test_info_list_, TestReportable);
+}
+
+// Get the number of tests in this test case that should run.
+int TestCase::test_to_run_count() const {
+ return CountIf(test_info_list_, ShouldRunTest);
+}
+
+// Gets the number of all tests.
+int TestCase::total_test_count() const {
+ return static_cast<int>(test_info_list_.size());
+}
+
+// Creates a TestCase with the given name.
+//
+// Arguments:
+//
+// name: name of the test case
+// a_type_param: the name of the test case's type parameter, or NULL if
+// this is not a typed or a type-parameterized test case.
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+TestCase::TestCase(const char* a_name, const char* a_type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc)
+ : name_(a_name),
+ type_param_(a_type_param ? new std::string(a_type_param) : NULL),
+ set_up_tc_(set_up_tc),
+ tear_down_tc_(tear_down_tc),
+ should_run_(false),
+ elapsed_time_(0) {
+}
+
+// Destructor of TestCase.
+TestCase::~TestCase() {
+ // Deletes every Test in the collection.
+ ForEach(test_info_list_, internal::Delete<TestInfo>);
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+const TestInfo* TestCase::GetTestInfo(int i) const {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Returns the i-th test among all the tests. i can range from 0 to
+// total_test_count() - 1. If i is not in that range, returns NULL.
+TestInfo* TestCase::GetMutableTestInfo(int i) {
+ const int index = GetElementOr(test_indices_, i, -1);
+ return index < 0 ? NULL : test_info_list_[index];
+}
+
+// Adds a test to this test case. Will delete the test upon
+// destruction of the TestCase object.
+void TestCase::AddTestInfo(TestInfo * test_info) {
+ test_info_list_.push_back(test_info);
+ test_indices_.push_back(static_cast<int>(test_indices_.size()));
+}
+
+// Runs every test in this TestCase.
+void TestCase::Run() {
+ if (!should_run_) return;
+
+ internal::UnitTestImpl* const impl = internal::GetUnitTestImpl();
+ impl->set_current_test_case(this);
+
+ TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater();
+
+ repeater->OnTestCaseStart(*this);
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &TestCase::RunSetUpTestCase, "SetUpTestCase()");
+
+ const internal::TimeInMillis start = internal::GetTimeInMillis();
+ for (int i = 0; i < total_test_count(); i++) {
+ GetMutableTestInfo(i)->Run();
+ }
+ elapsed_time_ = internal::GetTimeInMillis() - start;
+
+ impl->os_stack_trace_getter()->UponLeavingGTest();
+ internal::HandleExceptionsInMethodIfSupported(
+ this, &TestCase::RunTearDownTestCase, "TearDownTestCase()");
+
+ repeater->OnTestCaseEnd(*this);
+ impl->set_current_test_case(NULL);
+}
+
+// Clears the results of all tests in this test case.
+void TestCase::ClearResult() {
+ ad_hoc_test_result_.Clear();
+ ForEach(test_info_list_, TestInfo::ClearTestResult);
+}
+
+// Shuffles the tests in this test case.
+void TestCase::ShuffleTests(internal::Random* random) {
+ Shuffle(random, &test_indices_);
+}
+
+// Restores the test order to before the first shuffle.
+void TestCase::UnshuffleTests() {
+ for (size_t i = 0; i < test_indices_.size(); i++) {
+ test_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Formats a countable noun. Depending on its quantity, either the
+// singular form or the plural form is used. e.g.
+//
+// FormatCountableNoun(1, "formula", "formuli") returns "1 formula".
+// FormatCountableNoun(5, "book", "books") returns "5 books".
+static std::string FormatCountableNoun(int count,
+ const char * singular_form,
+ const char * plural_form) {
+ return internal::StreamableToString(count) + " " +
+ (count == 1 ? singular_form : plural_form);
+}
+
+// Formats the count of tests.
+static std::string FormatTestCount(int test_count) {
+ return FormatCountableNoun(test_count, "test", "tests");
+}
+
+// Formats the count of test cases.
+static std::string FormatTestCaseCount(int test_case_count) {
+ return FormatCountableNoun(test_case_count, "test case", "test cases");
+}
+
+// Converts a TestPartResult::Type enum to human-friendly string
+// representation. Both kNonFatalFailure and kFatalFailure are translated
+// to "Failure", as the user usually doesn't care about the difference
+// between the two when viewing the test result.
+static const char * TestPartResultTypeToString(TestPartResult::Type type) {
+ switch (type) {
+ case TestPartResult::kSuccess:
+ return "Success";
+
+ case TestPartResult::kNonFatalFailure:
+ case TestPartResult::kFatalFailure:
+#ifdef _MSC_VER
+ return "error: ";
+#else
+ return "Failure\n";
+#endif
+ default:
+ return "Unknown result type";
+ }
+}
+
+namespace internal {
+
+// Prints a TestPartResult to an std::string.
+static std::string PrintTestPartResultToString(
+ const TestPartResult& test_part_result) {
+ return (Message()
+ << internal::FormatFileLocation(test_part_result.file_name(),
+ test_part_result.line_number())
+ << " " << TestPartResultTypeToString(test_part_result.type())
+ << test_part_result.message()).GetString();
+}
+
+// Prints a TestPartResult.
+static void PrintTestPartResult(const TestPartResult& test_part_result) {
+ const std::string& result =
+ PrintTestPartResultToString(test_part_result);
+ printf("%s\n", result.c_str());
+ fflush(stdout);
+ // If the test program runs in Visual Studio or a debugger, the
+ // following statements add the test part result message to the Output
+ // window such that the user can double-click on it to jump to the
+ // corresponding source code location; otherwise they do nothing.
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ // We don't call OutputDebugString*() on Windows Mobile, as printing
+ // to stdout is done by OutputDebugString() there already - we don't
+ // want the same message printed twice.
+ ::OutputDebugStringA(result.c_str());
+ ::OutputDebugStringA("\n");
+#endif
+}
+
+// class PrettyUnitTestResultPrinter
+
+enum GTestColor {
+ COLOR_DEFAULT,
+ COLOR_RED,
+ COLOR_GREEN,
+ COLOR_YELLOW
+};
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
+ !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+
+// Returns the character attribute for the given color.
+WORD GetColorAttribute(GTestColor color) {
+ switch (color) {
+ case COLOR_RED: return FOREGROUND_RED;
+ case COLOR_GREEN: return FOREGROUND_GREEN;
+ case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN;
+ default: return 0;
+ }
+}
+
+#else
+
+// Returns the ANSI color code for the given color. COLOR_DEFAULT is
+// an invalid input.
+const char* GetAnsiColorCode(GTestColor color) {
+ switch (color) {
+ case COLOR_RED: return "1";
+ case COLOR_GREEN: return "2";
+ case COLOR_YELLOW: return "3";
+ default: return NULL;
+ };
+}
+
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+
+// Returns true iff Google Test should use colors in the output.
+bool ShouldUseColor(bool stdout_is_tty) {
+ const char* const gtest_color = GTEST_FLAG(color).c_str();
+
+ if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) {
+#if GTEST_OS_WINDOWS
+ // On Windows the TERM variable is usually not set, but the
+ // console there does support colors.
+ return stdout_is_tty;
+#else
+ // On non-Windows platforms, we rely on the TERM variable.
+ const char* const term = posix::GetEnv("TERM");
+ const bool term_supports_color =
+ String::CStringEquals(term, "xterm") ||
+ String::CStringEquals(term, "xterm-color") ||
+ String::CStringEquals(term, "xterm-256color") ||
+ String::CStringEquals(term, "screen") ||
+ String::CStringEquals(term, "screen-256color") ||
+ String::CStringEquals(term, "tmux") ||
+ String::CStringEquals(term, "tmux-256color") ||
+ String::CStringEquals(term, "rxvt-unicode") ||
+ String::CStringEquals(term, "rxvt-unicode-256color") ||
+ String::CStringEquals(term, "linux") ||
+ String::CStringEquals(term, "cygwin");
+ return stdout_is_tty && term_supports_color;
+#endif // GTEST_OS_WINDOWS
+ }
+
+ return String::CaseInsensitiveCStringEquals(gtest_color, "yes") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "true") ||
+ String::CaseInsensitiveCStringEquals(gtest_color, "t") ||
+ String::CStringEquals(gtest_color, "1");
+ // We take "yes", "true", "t", and "1" as meaning "yes". If the
+ // value is neither one of these nor "auto", we treat it as "no" to
+ // be conservative.
+}
+
+// Helpers for printing colored strings to stdout. Note that on Windows, we
+// cannot simply emit special characters and have the terminal change colors.
+// This routine must actually emit the characters rather than return a string
+// that would be colored when printed, as can be done on Linux.
+void ColoredPrintf(GTestColor color, const char* fmt, ...) {
+ va_list args;
+ va_start(args, fmt);
+
+#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || \
+ GTEST_OS_IOS || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT
+ const bool use_color = AlwaysFalse();
+#else
+ static const bool in_color_mode =
+ ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0);
+ const bool use_color = in_color_mode && (color != COLOR_DEFAULT);
+#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS
+ // The '!= 0' comparison is necessary to satisfy MSVC 7.1.
+
+ if (!use_color) {
+ vprintf(fmt, args);
+ va_end(args);
+ return;
+ }
+
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \
+ !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+ const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE);
+
+ // Gets the current text color.
+ CONSOLE_SCREEN_BUFFER_INFO buffer_info;
+ GetConsoleScreenBufferInfo(stdout_handle, &buffer_info);
+ const WORD old_color_attrs = buffer_info.wAttributes;
+
+ // We need to flush the stream buffers into the console before each
+ // SetConsoleTextAttribute call lest it affect the text that is already
+ // printed but has not yet reached the console.
+ fflush(stdout);
+ SetConsoleTextAttribute(stdout_handle,
+ GetColorAttribute(color) | FOREGROUND_INTENSITY);
+ vprintf(fmt, args);
+
+ fflush(stdout);
+ // Restores the text color.
+ SetConsoleTextAttribute(stdout_handle, old_color_attrs);
+#else
+ printf("\033[0;3%sm", GetAnsiColorCode(color));
+ vprintf(fmt, args);
+ printf("\033[m"); // Resets the terminal to default.
+#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE
+ va_end(args);
+}
+
+// Text printed in Google Test's text output and --gunit_list_tests
+// output to label the type parameter and value parameter for a test.
+static const char kTypeParamLabel[] = "TypeParam";
+static const char kValueParamLabel[] = "GetParam()";
+
+void PrintFullTestCommentIfPresent(const TestInfo& test_info) {
+ const char* const type_param = test_info.type_param();
+ const char* const value_param = test_info.value_param();
+
+ if (type_param != NULL || value_param != NULL) {
+ printf(", where ");
+ if (type_param != NULL) {
+ printf("%s = %s", kTypeParamLabel, type_param);
+ if (value_param != NULL)
+ printf(" and ");
+ }
+ if (value_param != NULL) {
+ printf("%s = %s", kValueParamLabel, value_param);
+ }
+ }
+}
+
+// This class implements the TestEventListener interface.
+//
+// Class PrettyUnitTestResultPrinter is copyable.
+class PrettyUnitTestResultPrinter : public TestEventListener {
+ public:
+ PrettyUnitTestResultPrinter() {}
+ static void PrintTestName(const char * test_case, const char * test) {
+ printf("%s.%s", test_case, test);
+ }
+
+ // The following methods override what's in the TestEventListener class.
+ virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {}
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {}
+
+ private:
+ static void PrintFailedTests(const UnitTest& unit_test);
+};
+
+ // Fired before each iteration of tests starts.
+void PrettyUnitTestResultPrinter::OnTestIterationStart(
+ const UnitTest& unit_test, int iteration) {
+ if (GTEST_FLAG(repeat) != 1)
+ printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1);
+
+ const char* const filter = GTEST_FLAG(filter).c_str();
+
+ // Prints the filter if it's not *. This reminds the user that some
+ // tests may be skipped.
+ if (!String::CStringEquals(filter, kUniversalFilter)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: %s filter = %s\n", GTEST_NAME_, filter);
+ }
+
+ if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) {
+ const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1);
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: This is test shard %d of %s.\n",
+ static_cast<int>(shard_index) + 1,
+ internal::posix::GetEnv(kTestTotalShards));
+ }
+
+ if (GTEST_FLAG(shuffle)) {
+ ColoredPrintf(COLOR_YELLOW,
+ "Note: Randomizing tests' orders with a seed of %d .\n",
+ unit_test.random_seed());
+ }
+
+ ColoredPrintf(COLOR_GREEN, "[==========] ");
+ printf("Running %s from %s.\n",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("Global test environment set-up.\n");
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) {
+ const std::string counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s", counts.c_str(), test_case.name());
+ if (test_case.type_param() == NULL) {
+ printf("\n");
+ } else {
+ printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param());
+ }
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) {
+ ColoredPrintf(COLOR_GREEN, "[ RUN ] ");
+ PrintTestName(test_info.test_case_name(), test_info.name());
+ printf("\n");
+ fflush(stdout);
+}
+
+// Called after an assertion failure.
+void PrettyUnitTestResultPrinter::OnTestPartResult(
+ const TestPartResult& result) {
+ // If the test part succeeded, we don't need to do anything.
+ if (result.type() == TestPartResult::kSuccess)
+ return;
+
+ // Print failure message from the assertion (e.g. expected this and got that).
+ PrintTestPartResult(result);
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) {
+ if (test_info.result()->Passed()) {
+ ColoredPrintf(COLOR_GREEN, "[ OK ] ");
+ } else {
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ }
+ PrintTestName(test_info.test_case_name(), test_info.name());
+ if (test_info.result()->Failed())
+ PrintFullTestCommentIfPresent(test_info);
+
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms)\n", internal::StreamableToString(
+ test_info.result()->elapsed_time()).c_str());
+ } else {
+ printf("\n");
+ }
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) {
+ if (!GTEST_FLAG(print_time)) return;
+
+ const std::string counts =
+ FormatCountableNoun(test_case.test_to_run_count(), "test", "tests");
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("%s from %s (%s ms total)\n\n",
+ counts.c_str(), test_case.name(),
+ internal::StreamableToString(test_case.elapsed_time()).c_str());
+ fflush(stdout);
+}
+
+void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart(
+ const UnitTest& /*unit_test*/) {
+ ColoredPrintf(COLOR_GREEN, "[----------] ");
+ printf("Global test environment tear-down\n");
+ fflush(stdout);
+}
+
+// Internal helper for printing the list of failed tests.
+void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) {
+ const int failed_test_count = unit_test.failed_test_count();
+ if (failed_test_count == 0) {
+ return;
+ }
+
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+ const TestCase& test_case = *unit_test.GetTestCase(i);
+ if (!test_case.should_run() || (test_case.failed_test_count() == 0)) {
+ continue;
+ }
+ for (int j = 0; j < test_case.total_test_count(); ++j) {
+ const TestInfo& test_info = *test_case.GetTestInfo(j);
+ if (!test_info.should_run() || test_info.result()->Passed()) {
+ continue;
+ }
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ printf("%s.%s", test_case.name(), test_info.name());
+ PrintFullTestCommentIfPresent(test_info);
+ printf("\n");
+ }
+ }
+}
+
+void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ ColoredPrintf(COLOR_GREEN, "[==========] ");
+ printf("%s from %s ran.",
+ FormatTestCount(unit_test.test_to_run_count()).c_str(),
+ FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str());
+ if (GTEST_FLAG(print_time)) {
+ printf(" (%s ms total)",
+ internal::StreamableToString(unit_test.elapsed_time()).c_str());
+ }
+ printf("\n");
+ ColoredPrintf(COLOR_GREEN, "[ PASSED ] ");
+ printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str());
+
+ int num_failures = unit_test.failed_test_count();
+ if (!unit_test.Passed()) {
+ const int failed_test_count = unit_test.failed_test_count();
+ ColoredPrintf(COLOR_RED, "[ FAILED ] ");
+ printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str());
+ PrintFailedTests(unit_test);
+ printf("\n%2d FAILED %s\n", num_failures,
+ num_failures == 1 ? "TEST" : "TESTS");
+ }
+
+ int num_disabled = unit_test.reportable_disabled_test_count();
+ if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) {
+ if (!num_failures) {
+ printf("\n"); // Add a spacer if no FAILURE banner is displayed.
+ }
+ ColoredPrintf(COLOR_YELLOW,
+ " YOU HAVE %d DISABLED %s\n\n",
+ num_disabled,
+ num_disabled == 1 ? "TEST" : "TESTS");
+ }
+ // Ensure that Google Test output is printed before, e.g., heapchecker output.
+ fflush(stdout);
+}
+
+// End PrettyUnitTestResultPrinter
+
+// class TestEventRepeater
+//
+// This class forwards events to other event listeners.
+class TestEventRepeater : public TestEventListener {
+ public:
+ TestEventRepeater() : forwarding_enabled_(true) {}
+ virtual ~TestEventRepeater();
+ void Append(TestEventListener *listener);
+ TestEventListener* Release(TestEventListener* listener);
+
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled() const { return forwarding_enabled_; }
+ void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; }
+
+ virtual void OnTestProgramStart(const UnitTest& unit_test);
+ virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration);
+ virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test);
+ virtual void OnTestCaseStart(const TestCase& test_case);
+ virtual void OnTestStart(const TestInfo& test_info);
+ virtual void OnTestPartResult(const TestPartResult& result);
+ virtual void OnTestEnd(const TestInfo& test_info);
+ virtual void OnTestCaseEnd(const TestCase& test_case);
+ virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test);
+ virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test);
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+ virtual void OnTestProgramEnd(const UnitTest& unit_test);
+
+ private:
+ // Controls whether events will be forwarded to listeners_. Set to false
+ // in death test child processes.
+ bool forwarding_enabled_;
+ // The list of listeners that receive events.
+ std::vector<TestEventListener*> listeners_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater);
+};
+
+TestEventRepeater::~TestEventRepeater() {
+ ForEach(listeners_, Delete<TestEventListener>);
+}
+
+void TestEventRepeater::Append(TestEventListener *listener) {
+ listeners_.push_back(listener);
+}
+
+// TODO(vladl@google.com): Factor the search functionality into Vector::Find.
+TestEventListener* TestEventRepeater::Release(TestEventListener *listener) {
+ for (size_t i = 0; i < listeners_.size(); ++i) {
+ if (listeners_[i] == listener) {
+ listeners_.erase(listeners_.begin() + i);
+ return listener;
+ }
+ }
+
+ return NULL;
+}
+
+// Since most methods are very similar, use macros to reduce boilerplate.
+// This defines a member that forwards the call to all listeners.
+#define GTEST_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (size_t i = 0; i < listeners_.size(); i++) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+// This defines a member that forwards the call to all listeners in reverse
+// order.
+#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \
+void TestEventRepeater::Name(const Type& parameter) { \
+ if (forwarding_enabled_) { \
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) { \
+ listeners_[i]->Name(parameter); \
+ } \
+ } \
+}
+
+GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest)
+GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase)
+GTEST_REPEATER_METHOD_(OnTestStart, TestInfo)
+GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult)
+GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase)
+GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest)
+
+#undef GTEST_REPEATER_METHOD_
+#undef GTEST_REVERSE_REPEATER_METHOD_
+
+void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (size_t i = 0; i < listeners_.size(); i++) {
+ listeners_[i]->OnTestIterationStart(unit_test, iteration);
+ }
+ }
+}
+
+void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test,
+ int iteration) {
+ if (forwarding_enabled_) {
+ for (int i = static_cast<int>(listeners_.size()) - 1; i >= 0; i--) {
+ listeners_[i]->OnTestIterationEnd(unit_test, iteration);
+ }
+ }
+}
+
+// End TestEventRepeater
+
+// This class generates an XML output file.
+class XmlUnitTestResultPrinter : public EmptyTestEventListener {
+ public:
+ explicit XmlUnitTestResultPrinter(const char* output_file);
+
+ virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration);
+
+ private:
+ // Is c a whitespace character that is normalized to a space character
+ // when it appears in an XML attribute value?
+ static bool IsNormalizableWhitespace(char c) {
+ return c == 0x9 || c == 0xA || c == 0xD;
+ }
+
+ // May c appear in a well-formed XML document?
+ static bool IsValidXmlCharacter(char c) {
+ return IsNormalizableWhitespace(c) || c >= 0x20;
+ }
+
+ // Returns an XML-escaped copy of the input string str. If
+ // is_attribute is true, the text is meant to appear as an attribute
+ // value, and normalizable whitespace is preserved by replacing it
+ // with character references.
+ static std::string EscapeXml(const std::string& str, bool is_attribute);
+
+ // Returns the given string with all characters invalid in XML removed.
+ static std::string RemoveInvalidXmlCharacters(const std::string& str);
+
+ // Convenience wrapper around EscapeXml when str is an attribute value.
+ static std::string EscapeXmlAttribute(const std::string& str) {
+ return EscapeXml(str, true);
+ }
+
+ // Convenience wrapper around EscapeXml when str is not an attribute value.
+ static std::string EscapeXmlText(const char* str) {
+ return EscapeXml(str, false);
+ }
+
+ // Verifies that the given attribute belongs to the given element and
+ // streams the attribute as XML.
+ static void OutputXmlAttribute(std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ const std::string& value);
+
+ // Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+ static void OutputXmlCDataSection(::std::ostream* stream, const char* data);
+
+ // Streams an XML representation of a TestInfo object.
+ static void OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info);
+
+ // Prints an XML representation of a TestCase object
+ static void PrintXmlTestCase(::std::ostream* stream,
+ const TestCase& test_case);
+
+ // Prints an XML summary of unit_test to output stream out.
+ static void PrintXmlUnitTest(::std::ostream* stream,
+ const UnitTest& unit_test);
+
+ // Produces a string representing the test properties in a result as space
+ // delimited XML attributes based on the property key="value" pairs.
+ // When the std::string is not empty, it includes a space at the beginning,
+ // to delimit this attribute from prior attributes.
+ static std::string TestPropertiesAsXmlAttributes(const TestResult& result);
+
+ // The output file.
+ const std::string output_file_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter);
+};
+
+// Creates a new XmlUnitTestResultPrinter.
+XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file)
+ : output_file_(output_file) {
+ if (output_file_.c_str() == NULL || output_file_.empty()) {
+ fprintf(stderr, "XML output file may not be null\n");
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+}
+
+// Called after the unit test ends.
+void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test,
+ int /*iteration*/) {
+ FILE* xmlout = NULL;
+ FilePath output_file(output_file_);
+ FilePath output_dir(output_file.RemoveFileName());
+
+ if (output_dir.CreateDirectoriesRecursively()) {
+ xmlout = posix::FOpen(output_file_.c_str(), "w");
+ }
+ if (xmlout == NULL) {
+ // TODO(wan): report the reason of the failure.
+ //
+ // We don't do it for now as:
+ //
+ // 1. There is no urgent need for it.
+ // 2. It's a bit involved to make the errno variable thread-safe on
+ // all three operating systems (Linux, Windows, and Mac OS).
+ // 3. To interpret the meaning of errno in a thread-safe way,
+ // we need the strerror_r() function, which is not available on
+ // Windows.
+ fprintf(stderr,
+ "Unable to open file \"%s\"\n",
+ output_file_.c_str());
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+ std::stringstream stream;
+ PrintXmlUnitTest(&stream, unit_test);
+ fprintf(xmlout, "%s", StringStreamToString(&stream).c_str());
+ fclose(xmlout);
+}
+
+// Returns an XML-escaped copy of the input string str. If is_attribute
+// is true, the text is meant to appear as an attribute value, and
+// normalizable whitespace is preserved by replacing it with character
+// references.
+//
+// Invalid XML characters in str, if any, are stripped from the output.
+// It is expected that most, if not all, of the text processed by this
+// module will consist of ordinary English text.
+// If this module is ever modified to produce version 1.1 XML output,
+// most invalid characters can be retained using character references.
+// TODO(wan): It might be nice to have a minimally invasive, human-readable
+// escaping scheme for invalid characters, rather than dropping them.
+std::string XmlUnitTestResultPrinter::EscapeXml(
+ const std::string& str, bool is_attribute) {
+ Message m;
+
+ for (size_t i = 0; i < str.size(); ++i) {
+ const char ch = str[i];
+ switch (ch) {
+ case '<':
+ m << "&lt;";
+ break;
+ case '>':
+ m << "&gt;";
+ break;
+ case '&':
+ m << "&amp;";
+ break;
+ case '\'':
+ if (is_attribute)
+ m << "&apos;";
+ else
+ m << '\'';
+ break;
+ case '"':
+ if (is_attribute)
+ m << "&quot;";
+ else
+ m << '"';
+ break;
+ default:
+ if (IsValidXmlCharacter(ch)) {
+ if (is_attribute && IsNormalizableWhitespace(ch))
+ m << "&#x" << String::FormatByte(static_cast<unsigned char>(ch))
+ << ";";
+ else
+ m << ch;
+ }
+ break;
+ }
+ }
+
+ return m.GetString();
+}
+
+// Returns the given string with all characters invalid in XML removed.
+// Currently invalid characters are dropped from the string. An
+// alternative is to replace them with certain characters such as . or ?.
+std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(
+ const std::string& str) {
+ std::string output;
+ output.reserve(str.size());
+ for (std::string::const_iterator it = str.begin(); it != str.end(); ++it)
+ if (IsValidXmlCharacter(*it))
+ output.push_back(*it);
+
+ return output;
+}
+
+// The following routines generate an XML representation of a UnitTest
+// object.
+//
+// This is how Google Test concepts map to the DTD:
+//
+// <testsuites name="AllTests"> <-- corresponds to a UnitTest object
+// <testsuite name="testcase-name"> <-- corresponds to a TestCase object
+// <testcase name="test-name"> <-- corresponds to a TestInfo object
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <failure message="...">...</failure>
+// <-- individual assertion failures
+// </testcase>
+// </testsuite>
+// </testsuites>
+
+// Formats the given time in milliseconds as seconds.
+std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) {
+ ::std::stringstream ss;
+ ss << (static_cast<double>(ms) * 1e-3);
+ return ss.str();
+}
+
+static bool PortableLocaltime(time_t seconds, struct tm* out) {
+#if defined(_MSC_VER)
+ return localtime_s(out, &seconds) == 0;
+#elif defined(__MINGW32__) || defined(__MINGW64__)
+ // MINGW <time.h> provides neither localtime_r nor localtime_s, but uses
+ // Windows' localtime(), which has a thread-local tm buffer.
+ struct tm* tm_ptr = localtime(&seconds); // NOLINT
+ if (tm_ptr == NULL)
+ return false;
+ *out = *tm_ptr;
+ return true;
+#else
+ return localtime_r(&seconds, out) != NULL;
+#endif
+}
+
+// Converts the given epoch time in milliseconds to a date string in the ISO
+// 8601 format, without the timezone information.
+std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) {
+ struct tm time_struct;
+ if (!PortableLocaltime(static_cast<time_t>(ms / 1000), &time_struct))
+ return "";
+ // YYYY-MM-DDThh:mm:ss
+ return StreamableToString(time_struct.tm_year + 1900) + "-" +
+ String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" +
+ String::FormatIntWidth2(time_struct.tm_mday) + "T" +
+ String::FormatIntWidth2(time_struct.tm_hour) + ":" +
+ String::FormatIntWidth2(time_struct.tm_min) + ":" +
+ String::FormatIntWidth2(time_struct.tm_sec);
+}
+
+// Streams an XML CDATA section, escaping invalid CDATA sequences as needed.
+void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream,
+ const char* data) {
+ const char* segment = data;
+ *stream << "<![CDATA[";
+ for (;;) {
+ const char* const next_segment = strstr(segment, "]]>");
+ if (next_segment != NULL) {
+ stream->write(
+ segment, static_cast<std::streamsize>(next_segment - segment));
+ *stream << "]]>]]&gt;<![CDATA[";
+ segment = next_segment + strlen("]]>");
+ } else {
+ *stream << segment;
+ break;
+ }
+ }
+ *stream << "]]>";
+}
+
+void XmlUnitTestResultPrinter::OutputXmlAttribute(
+ std::ostream* stream,
+ const std::string& element_name,
+ const std::string& name,
+ const std::string& value) {
+ const std::vector<std::string>& allowed_names =
+ GetReservedAttributesForElement(element_name);
+
+ GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) !=
+ allowed_names.end())
+ << "Attribute " << name << " is not allowed for element <" << element_name
+ << ">.";
+
+ *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\"";
+}
+
+// Prints an XML representation of a TestInfo object.
+// TODO(wan): There is also value in printing properties with the plain printer.
+void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream,
+ const char* test_case_name,
+ const TestInfo& test_info) {
+ const TestResult& result = *test_info.result();
+ const std::string kTestcase = "testcase";
+
+ *stream << " <testcase";
+ OutputXmlAttribute(stream, kTestcase, "name", test_info.name());
+
+ if (test_info.value_param() != NULL) {
+ OutputXmlAttribute(stream, kTestcase, "value_param",
+ test_info.value_param());
+ }
+ if (test_info.type_param() != NULL) {
+ OutputXmlAttribute(stream, kTestcase, "type_param", test_info.type_param());
+ }
+
+ OutputXmlAttribute(stream, kTestcase, "status",
+ test_info.should_run() ? "run" : "notrun");
+ OutputXmlAttribute(stream, kTestcase, "time",
+ FormatTimeInMillisAsSeconds(result.elapsed_time()));
+ OutputXmlAttribute(stream, kTestcase, "classname", test_case_name);
+ *stream << TestPropertiesAsXmlAttributes(result);
+
+ int failures = 0;
+ for (int i = 0; i < result.total_part_count(); ++i) {
+ const TestPartResult& part = result.GetTestPartResult(i);
+ if (part.failed()) {
+ if (++failures == 1) {
+ *stream << ">\n";
+ }
+ const string location = internal::FormatCompilerIndependentFileLocation(
+ part.file_name(), part.line_number());
+ const string summary = location + "\n" + part.summary();
+ *stream << " <failure message=\""
+ << EscapeXmlAttribute(summary.c_str())
+ << "\" type=\"\">";
+ const string detail = location + "\n" + part.message();
+ OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str());
+ *stream << "</failure>\n";
+ }
+ }
+
+ if (failures == 0)
+ *stream << " />\n";
+ else
+ *stream << " </testcase>\n";
+}
+
+// Prints an XML representation of a TestCase object
+void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream,
+ const TestCase& test_case) {
+ const std::string kTestsuite = "testsuite";
+ *stream << " <" << kTestsuite;
+ OutputXmlAttribute(stream, kTestsuite, "name", test_case.name());
+ OutputXmlAttribute(stream, kTestsuite, "tests",
+ StreamableToString(test_case.reportable_test_count()));
+ OutputXmlAttribute(stream, kTestsuite, "failures",
+ StreamableToString(test_case.failed_test_count()));
+ OutputXmlAttribute(
+ stream, kTestsuite, "disabled",
+ StreamableToString(test_case.reportable_disabled_test_count()));
+ OutputXmlAttribute(stream, kTestsuite, "errors", "0");
+ OutputXmlAttribute(stream, kTestsuite, "time",
+ FormatTimeInMillisAsSeconds(test_case.elapsed_time()));
+ *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result())
+ << ">\n";
+
+ for (int i = 0; i < test_case.total_test_count(); ++i) {
+ if (test_case.GetTestInfo(i)->is_reportable())
+ OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i));
+ }
+ *stream << " </" << kTestsuite << ">\n";
+}
+
+// Prints an XML summary of unit_test to output stream out.
+void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream,
+ const UnitTest& unit_test) {
+ const std::string kTestsuites = "testsuites";
+
+ *stream << "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n";
+ *stream << "<" << kTestsuites;
+
+ OutputXmlAttribute(stream, kTestsuites, "tests",
+ StreamableToString(unit_test.reportable_test_count()));
+ OutputXmlAttribute(stream, kTestsuites, "failures",
+ StreamableToString(unit_test.failed_test_count()));
+ OutputXmlAttribute(
+ stream, kTestsuites, "disabled",
+ StreamableToString(unit_test.reportable_disabled_test_count()));
+ OutputXmlAttribute(stream, kTestsuites, "errors", "0");
+ OutputXmlAttribute(
+ stream, kTestsuites, "timestamp",
+ FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp()));
+ OutputXmlAttribute(stream, kTestsuites, "time",
+ FormatTimeInMillisAsSeconds(unit_test.elapsed_time()));
+
+ if (GTEST_FLAG(shuffle)) {
+ OutputXmlAttribute(stream, kTestsuites, "random_seed",
+ StreamableToString(unit_test.random_seed()));
+ }
+
+ *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result());
+
+ OutputXmlAttribute(stream, kTestsuites, "name", "AllTests");
+ *stream << ">\n";
+
+ for (int i = 0; i < unit_test.total_test_case_count(); ++i) {
+ if (unit_test.GetTestCase(i)->reportable_test_count() > 0)
+ PrintXmlTestCase(stream, *unit_test.GetTestCase(i));
+ }
+ *stream << "</" << kTestsuites << ">\n";
+}
+
+// Produces a string representing the test properties in a result as space
+// delimited XML attributes based on the property key="value" pairs.
+std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes(
+ const TestResult& result) {
+ Message attributes;
+ for (int i = 0; i < result.test_property_count(); ++i) {
+ const TestProperty& property = result.GetTestProperty(i);
+ attributes << " " << property.key() << "="
+ << "\"" << EscapeXmlAttribute(property.value()) << "\"";
+ }
+ return attributes.GetString();
+}
+
+// End XmlUnitTestResultPrinter
+
+#if GTEST_CAN_STREAM_RESULTS_
+
+// Checks if str contains '=', '&', '%' or '\n' characters. If yes,
+// replaces them by "%xx" where xx is their hexadecimal value. For
+// example, replaces "=" with "%3D". This algorithm is O(strlen(str))
+// in both time and space -- important as the input str may contain an
+// arbitrarily long test failure message and stack trace.
+string StreamingListener::UrlEncode(const char* str) {
+ string result;
+ result.reserve(strlen(str) + 1);
+ for (char ch = *str; ch != '\0'; ch = *++str) {
+ switch (ch) {
+ case '%':
+ case '=':
+ case '&':
+ case '\n':
+ result.append("%" + String::FormatByte(static_cast<unsigned char>(ch)));
+ break;
+ default:
+ result.push_back(ch);
+ break;
+ }
+ }
+ return result;
+}
+
+void StreamingListener::SocketWriter::MakeConnection() {
+ GTEST_CHECK_(sockfd_ == -1)
+ << "MakeConnection() can't be called when there is already a connection.";
+
+ addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses.
+ hints.ai_socktype = SOCK_STREAM;
+ addrinfo* servinfo = NULL;
+
+ // Use the getaddrinfo() to get a linked list of IP addresses for
+ // the given host name.
+ const int error_num = getaddrinfo(
+ host_name_.c_str(), port_num_.c_str(), &hints, &servinfo);
+ if (error_num != 0) {
+ GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: "
+ << gai_strerror(error_num);
+ }
+
+ // Loop through all the results and connect to the first we can.
+ for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL;
+ cur_addr = cur_addr->ai_next) {
+ sockfd_ = socket(
+ cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol);
+ if (sockfd_ != -1) {
+ // Connect the client socket to the server socket.
+ if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) {
+ close(sockfd_);
+ sockfd_ = -1;
+ }
+ }
+ }
+
+ freeaddrinfo(servinfo); // all done with this structure
+
+ if (sockfd_ == -1) {
+ GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to "
+ << host_name_ << ":" << port_num_;
+ }
+}
+
+// End of class Streaming Listener
+#endif // GTEST_CAN_STREAM_RESULTS__
+
+// Class ScopedTrace
+
+// Pushes the given source file location and message onto a per-thread
+// trace stack maintained by Google Test.
+ScopedTrace::ScopedTrace(const char* file, int line, const Message& message)
+ GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
+ TraceInfo trace;
+ trace.file = file;
+ trace.line = line;
+ trace.message = message.GetString();
+
+ UnitTest::GetInstance()->PushGTestTrace(trace);
+}
+
+// Pops the info pushed by the c'tor.
+ScopedTrace::~ScopedTrace()
+ GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) {
+ UnitTest::GetInstance()->PopGTestTrace();
+}
+
+
+// class OsStackTraceGetter
+
+const char* const OsStackTraceGetterInterface::kElidedFramesMarker =
+ "... " GTEST_NAME_ " internal frames ...";
+
+string OsStackTraceGetter::CurrentStackTrace(int /*max_depth*/,
+ int /*skip_count*/) {
+ return "";
+}
+
+void OsStackTraceGetter::UponLeavingGTest() {}
+
+// A helper class that creates the premature-exit file in its
+// constructor and deletes the file in its destructor.
+class ScopedPrematureExitFile {
+ public:
+ explicit ScopedPrematureExitFile(const char* premature_exit_filepath)
+ : premature_exit_filepath_(premature_exit_filepath) {
+ // If a path to the premature-exit file is specified...
+ if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') {
+ // create the file with a single "0" character in it. I/O
+ // errors are ignored as there's nothing better we can do and we
+ // don't want to fail the test because of this.
+ FILE* pfile = posix::FOpen(premature_exit_filepath, "w");
+ fwrite("0", 1, 1, pfile);
+ fclose(pfile);
+ }
+ }
+
+ ~ScopedPrematureExitFile() {
+ if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') {
+ remove(premature_exit_filepath_);
+ }
+ }
+
+ private:
+ const char* const premature_exit_filepath_;
+
+ GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile);
+};
+
+} // namespace internal
+
+// class TestEventListeners
+
+TestEventListeners::TestEventListeners()
+ : repeater_(new internal::TestEventRepeater()),
+ default_result_printer_(NULL),
+ default_xml_generator_(NULL) {
+}
+
+TestEventListeners::~TestEventListeners() { delete repeater_; }
+
+// Returns the standard listener responsible for the default console
+// output. Can be removed from the listeners list to shut down default
+// console output. Note that removing this object from the listener list
+// with Release transfers its ownership to the user.
+void TestEventListeners::Append(TestEventListener* listener) {
+ repeater_->Append(listener);
+}
+
+// Removes the given event listener from the list and returns it. It then
+// becomes the caller's responsibility to delete the listener. Returns
+// NULL if the listener is not found in the list.
+TestEventListener* TestEventListeners::Release(TestEventListener* listener) {
+ if (listener == default_result_printer_)
+ default_result_printer_ = NULL;
+ else if (listener == default_xml_generator_)
+ default_xml_generator_ = NULL;
+ return repeater_->Release(listener);
+}
+
+// Returns repeater that broadcasts the TestEventListener events to all
+// subscribers.
+TestEventListener* TestEventListeners::repeater() { return repeater_; }
+
+// Sets the default_result_printer attribute to the provided listener.
+// The listener is also added to the listener list and previous
+// default_result_printer is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) {
+ if (default_result_printer_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_result_printer_);
+ default_result_printer_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Sets the default_xml_generator attribute to the provided listener. The
+// listener is also added to the listener list and previous
+// default_xml_generator is removed from it and deleted. The listener can
+// also be NULL in which case it will not be added to the list. Does
+// nothing if the previous and the current listener objects are the same.
+void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) {
+ if (default_xml_generator_ != listener) {
+ // It is an error to pass this method a listener that is already in the
+ // list.
+ delete Release(default_xml_generator_);
+ default_xml_generator_ = listener;
+ if (listener != NULL)
+ Append(listener);
+ }
+}
+
+// Controls whether events will be forwarded by the repeater to the
+// listeners in the list.
+bool TestEventListeners::EventForwardingEnabled() const {
+ return repeater_->forwarding_enabled();
+}
+
+void TestEventListeners::SuppressEventForwarding() {
+ repeater_->set_forwarding_enabled(false);
+}
+
+// class UnitTest
+
+// Gets the singleton UnitTest object. The first time this method is
+// called, a UnitTest object is constructed and returned. Consecutive
+// calls will return the same object.
+//
+// We don't protect this under mutex_ as a user is not supposed to
+// call this before main() starts, from which point on the return
+// value will never change.
+UnitTest* UnitTest::GetInstance() {
+ // When compiled with MSVC 7.1 in optimized mode, destroying the
+ // UnitTest object upon exiting the program messes up the exit code,
+ // causing successful tests to appear failed. We have to use a
+ // different implementation in this case to bypass the compiler bug.
+ // This implementation makes the compiler happy, at the cost of
+ // leaking the UnitTest object.
+
+ // CodeGear C++Builder insists on a public destructor for the
+ // default implementation. Use this implementation to keep good OO
+ // design with private destructor.
+
+#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+ static UnitTest* const instance = new UnitTest;
+ return instance;
+#else
+ static UnitTest instance;
+ return &instance;
+#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__)
+}
+
+// Gets the number of successful test cases.
+int UnitTest::successful_test_case_count() const {
+ return impl()->successful_test_case_count();
+}
+
+// Gets the number of failed test cases.
+int UnitTest::failed_test_case_count() const {
+ return impl()->failed_test_case_count();
+}
+
+// Gets the number of all test cases.
+int UnitTest::total_test_case_count() const {
+ return impl()->total_test_case_count();
+}
+
+// Gets the number of all test cases that contain at least one test
+// that should run.
+int UnitTest::test_case_to_run_count() const {
+ return impl()->test_case_to_run_count();
+}
+
+// Gets the number of successful tests.
+int UnitTest::successful_test_count() const {
+ return impl()->successful_test_count();
+}
+
+// Gets the number of failed tests.
+int UnitTest::failed_test_count() const { return impl()->failed_test_count(); }
+
+// Gets the number of disabled tests that will be reported in the XML report.
+int UnitTest::reportable_disabled_test_count() const {
+ return impl()->reportable_disabled_test_count();
+}
+
+// Gets the number of disabled tests.
+int UnitTest::disabled_test_count() const {
+ return impl()->disabled_test_count();
+}
+
+// Gets the number of tests to be printed in the XML report.
+int UnitTest::reportable_test_count() const {
+ return impl()->reportable_test_count();
+}
+
+// Gets the number of all tests.
+int UnitTest::total_test_count() const { return impl()->total_test_count(); }
+
+// Gets the number of tests that should run.
+int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); }
+
+// Gets the time of the test program start, in ms from the start of the
+// UNIX epoch.
+internal::TimeInMillis UnitTest::start_timestamp() const {
+ return impl()->start_timestamp();
+}
+
+// Gets the elapsed time, in milliseconds.
+internal::TimeInMillis UnitTest::elapsed_time() const {
+ return impl()->elapsed_time();
+}
+
+// Returns true iff the unit test passed (i.e. all test cases passed).
+bool UnitTest::Passed() const { return impl()->Passed(); }
+
+// Returns true iff the unit test failed (i.e. some test case failed
+// or something outside of all tests failed).
+bool UnitTest::Failed() const { return impl()->Failed(); }
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+const TestCase* UnitTest::GetTestCase(int i) const {
+ return impl()->GetTestCase(i);
+}
+
+// Returns the TestResult containing information on test failures and
+// properties logged outside of individual test cases.
+const TestResult& UnitTest::ad_hoc_test_result() const {
+ return *impl()->ad_hoc_test_result();
+}
+
+// Gets the i-th test case among all the test cases. i can range from 0 to
+// total_test_case_count() - 1. If i is not in that range, returns NULL.
+TestCase* UnitTest::GetMutableTestCase(int i) {
+ return impl()->GetMutableTestCase(i);
+}
+
+// Returns the list of event listeners that can be used to track events
+// inside Google Test.
+TestEventListeners& UnitTest::listeners() {
+ return *impl()->listeners();
+}
+
+// Registers and returns a global test environment. When a test
+// program is run, all global test environments will be set-up in the
+// order they were registered. After all tests in the program have
+// finished, all global test environments will be torn-down in the
+// *reverse* order they were registered.
+//
+// The UnitTest object takes ownership of the given environment.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+Environment* UnitTest::AddEnvironment(Environment* env) {
+ if (env == NULL) {
+ return NULL;
+ }
+
+ impl_->environments().push_back(env);
+ return env;
+}
+
+// Adds a TestPartResult to the current TestResult object. All Google Test
+// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call
+// this to report their results. The user code should use the
+// assertion macros instead of calling this directly.
+void UnitTest::AddTestPartResult(
+ TestPartResult::Type result_type,
+ const char* file_name,
+ int line_number,
+ const std::string& message,
+ const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) {
+ Message msg;
+ msg << message;
+
+ internal::MutexLock lock(&mutex_);
+ if (impl_->gtest_trace_stack().size() > 0) {
+ msg << "\n" << GTEST_NAME_ << " trace:";
+
+ for (int i = static_cast<int>(impl_->gtest_trace_stack().size());
+ i > 0; --i) {
+ const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1];
+ msg << "\n" << internal::FormatFileLocation(trace.file, trace.line)
+ << " " << trace.message;
+ }
+ }
+
+ if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) {
+ msg << internal::kStackTraceMarker << os_stack_trace;
+ }
+
+ const TestPartResult result =
+ TestPartResult(result_type, file_name, line_number,
+ msg.GetString().c_str());
+ impl_->GetTestPartResultReporterForCurrentThread()->
+ ReportTestPartResult(result);
+
+ if (result_type != TestPartResult::kSuccess) {
+ // gtest_break_on_failure takes precedence over
+ // gtest_throw_on_failure. This allows a user to set the latter
+ // in the code (perhaps in order to use Google Test assertions
+ // with another testing framework) and specify the former on the
+ // command line for debugging.
+ if (GTEST_FLAG(break_on_failure)) {
+#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+ // Using DebugBreak on Windows allows gtest to still break into a debugger
+ // when a failure happens and both the --gtest_break_on_failure and
+ // the --gtest_catch_exceptions flags are specified.
+ DebugBreak();
+#else
+ // Dereference NULL through a volatile pointer to prevent the compiler
+ // from removing. We use this rather than abort() or __builtin_trap() for
+ // portability: Symbian doesn't implement abort() well, and some debuggers
+ // don't correctly trap abort().
+ *static_cast<volatile int*>(NULL) = 1;
+#endif // GTEST_OS_WINDOWS
+ } else if (GTEST_FLAG(throw_on_failure)) {
+#if GTEST_HAS_EXCEPTIONS
+ throw internal::GoogleTestFailureException(result);
+#else
+ // We cannot call abort() as it generates a pop-up in debug mode
+ // that cannot be suppressed in VC 7.1 or below.
+ exit(1);
+#endif
+ }
+ }
+}
+
+// Adds a TestProperty to the current TestResult object when invoked from
+// inside a test, to current TestCase's ad_hoc_test_result_ when invoked
+// from SetUpTestCase or TearDownTestCase, or to the global property set
+// when invoked elsewhere. If the result already contains a property with
+// the same key, the value will be updated.
+void UnitTest::RecordProperty(const std::string& key,
+ const std::string& value) {
+ impl_->RecordProperty(TestProperty(key, value));
+}
+
+// Runs all tests in this UnitTest object and prints the result.
+// Returns 0 if successful, or 1 otherwise.
+//
+// We don't protect this under mutex_, as we only support calling it
+// from the main thread.
+int UnitTest::Run() {
+ const bool in_death_test_child_process =
+ internal::GTEST_FLAG(internal_run_death_test).length() > 0;
+
+ // Google Test implements this protocol for catching that a test
+ // program exits before returning control to Google Test:
+ //
+ // 1. Upon start, Google Test creates a file whose absolute path
+ // is specified by the environment variable
+ // TEST_PREMATURE_EXIT_FILE.
+ // 2. When Google Test has finished its work, it deletes the file.
+ //
+ // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before
+ // running a Google-Test-based test program and check the existence
+ // of the file at the end of the test execution to see if it has
+ // exited prematurely.
+
+ // If we are in the child process of a death test, don't
+ // create/delete the premature exit file, as doing so is unnecessary
+ // and will confuse the parent process. Otherwise, create/delete
+ // the file upon entering/leaving this function. If the program
+ // somehow exits before this function has a chance to return, the
+ // premature-exit file will be left undeleted, causing a test runner
+ // that understands the premature-exit-file protocol to report the
+ // test as having failed.
+ const internal::ScopedPrematureExitFile premature_exit_file(
+ in_death_test_child_process ?
+ NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE"));
+
+ // Captures the value of GTEST_FLAG(catch_exceptions). This value will be
+ // used for the duration of the program.
+ impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions));
+
+#if GTEST_HAS_SEH
+ // Either the user wants Google Test to catch exceptions thrown by the
+ // tests or this is executing in the context of death test child
+ // process. In either case the user does not want to see pop-up dialogs
+ // about crashes - they are expected.
+ if (impl()->catch_exceptions() || in_death_test_child_process) {
+# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT
+ // SetErrorMode doesn't exist on CE.
+ SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT |
+ SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX);
+# endif // !GTEST_OS_WINDOWS_MOBILE
+
+# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE
+ // Death test children can be terminated with _abort(). On Windows,
+ // _abort() can show a dialog with a warning message. This forces the
+ // abort message to go to stderr instead.
+ _set_error_mode(_OUT_TO_STDERR);
+# endif
+
+# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE
+ // In the debug version, Visual Studio pops up a separate dialog
+ // offering a choice to debug the aborted program. We need to suppress
+ // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement
+ // executed. Google Test will notify the user of any unexpected
+ // failure via stderr.
+ //
+ // VC++ doesn't define _set_abort_behavior() prior to the version 8.0.
+ // Users of prior VC versions shall suffer the agony and pain of
+ // clicking through the countless debug dialogs.
+ // TODO(vladl@google.com): find a way to suppress the abort dialog() in the
+ // debug mode when compiled with VC 7.1 or lower.
+ if (!GTEST_FLAG(break_on_failure))
+ _set_abort_behavior(
+ 0x0, // Clear the following flags:
+ _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump.
+# endif
+ }
+#endif // GTEST_HAS_SEH
+
+ return internal::HandleExceptionsInMethodIfSupported(
+ impl(),
+ &internal::UnitTestImpl::RunAllTests,
+ "auxiliary test code (environments or event listeners)") ? 0 : 1;
+}
+
+// Returns the working directory when the first TEST() or TEST_F() was
+// executed.
+const char* UnitTest::original_working_dir() const {
+ return impl_->original_working_dir_.c_str();
+}
+
+// Returns the TestCase object for the test that's currently running,
+// or NULL if no test is running.
+const TestCase* UnitTest::current_test_case() const
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_case();
+}
+
+// Returns the TestInfo object for the test that's currently running,
+// or NULL if no test is running.
+const TestInfo* UnitTest::current_test_info() const
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ return impl_->current_test_info();
+}
+
+// Returns the random seed used at the start of the current test run.
+int UnitTest::random_seed() const { return impl_->random_seed(); }
+
+#if GTEST_HAS_PARAM_TEST
+// Returns ParameterizedTestCaseRegistry object used to keep track of
+// value-parameterized tests and instantiate and register them.
+internal::ParameterizedTestCaseRegistry&
+ UnitTest::parameterized_test_registry()
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ return impl_->parameterized_test_registry();
+}
+#endif // GTEST_HAS_PARAM_TEST
+
+// Creates an empty UnitTest.
+UnitTest::UnitTest() {
+ impl_ = new internal::UnitTestImpl(this);
+}
+
+// Destructor of UnitTest.
+UnitTest::~UnitTest() {
+ delete impl_;
+}
+
+// Pushes a trace defined by SCOPED_TRACE() on to the per-thread
+// Google Test trace stack.
+void UnitTest::PushGTestTrace(const internal::TraceInfo& trace)
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().push_back(trace);
+}
+
+// Pops a trace from the per-thread Google Test trace stack.
+void UnitTest::PopGTestTrace()
+ GTEST_LOCK_EXCLUDED_(mutex_) {
+ internal::MutexLock lock(&mutex_);
+ impl_->gtest_trace_stack().pop_back();
+}
+
+namespace internal {
+
+UnitTestImpl::UnitTestImpl(UnitTest* parent)
+ : parent_(parent),
+ GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */)
+ default_global_test_part_result_reporter_(this),
+ default_per_thread_test_part_result_reporter_(this),
+ GTEST_DISABLE_MSC_WARNINGS_POP_()
+ global_test_part_result_repoter_(
+ &default_global_test_part_result_reporter_),
+ per_thread_test_part_result_reporter_(
+ &default_per_thread_test_part_result_reporter_),
+#if GTEST_HAS_PARAM_TEST
+ parameterized_test_registry_(),
+ parameterized_tests_registered_(false),
+#endif // GTEST_HAS_PARAM_TEST
+ last_death_test_case_(-1),
+ current_test_case_(NULL),
+ current_test_info_(NULL),
+ ad_hoc_test_result_(),
+ os_stack_trace_getter_(NULL),
+ post_flag_parse_init_performed_(false),
+ random_seed_(0), // Will be overridden by the flag before first use.
+ random_(0), // Will be reseeded before first use.
+ start_timestamp_(0),
+ elapsed_time_(0),
+#if GTEST_HAS_DEATH_TEST
+ death_test_factory_(new DefaultDeathTestFactory),
+#endif
+ // Will be overridden by the flag before first use.
+ catch_exceptions_(false) {
+ listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter);
+}
+
+UnitTestImpl::~UnitTestImpl() {
+ // Deletes every TestCase.
+ ForEach(test_cases_, internal::Delete<TestCase>);
+
+ // Deletes every Environment.
+ ForEach(environments_, internal::Delete<Environment>);
+
+ delete os_stack_trace_getter_;
+}
+
+// Adds a TestProperty to the current TestResult object when invoked in a
+// context of a test, to current test case's ad_hoc_test_result when invoke
+// from SetUpTestCase/TearDownTestCase, or to the global property set
+// otherwise. If the result already contains a property with the same key,
+// the value will be updated.
+void UnitTestImpl::RecordProperty(const TestProperty& test_property) {
+ std::string xml_element;
+ TestResult* test_result; // TestResult appropriate for property recording.
+
+ if (current_test_info_ != NULL) {
+ xml_element = "testcase";
+ test_result = &(current_test_info_->result_);
+ } else if (current_test_case_ != NULL) {
+ xml_element = "testsuite";
+ test_result = &(current_test_case_->ad_hoc_test_result_);
+ } else {
+ xml_element = "testsuites";
+ test_result = &ad_hoc_test_result_;
+ }
+ test_result->RecordProperty(xml_element, test_property);
+}
+
+#if GTEST_HAS_DEATH_TEST
+// Disables event forwarding if the control is currently in a death test
+// subprocess. Must not be called before InitGoogleTest.
+void UnitTestImpl::SuppressTestEventsIfInSubprocess() {
+ if (internal_run_death_test_flag_.get() != NULL)
+ listeners()->SuppressEventForwarding();
+}
+#endif // GTEST_HAS_DEATH_TEST
+
+// Initializes event listeners performing XML output as specified by
+// UnitTestOptions. Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureXmlOutput() {
+ const std::string& output_format = UnitTestOptions::GetOutputFormat();
+ if (output_format == "xml") {
+ listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter(
+ UnitTestOptions::GetAbsolutePathToOutputFile().c_str()));
+ } else if (output_format != "") {
+ printf("WARNING: unrecognized output format \"%s\" ignored.\n",
+ output_format.c_str());
+ fflush(stdout);
+ }
+}
+
+#if GTEST_CAN_STREAM_RESULTS_
+// Initializes event listeners for streaming test results in string form.
+// Must not be called before InitGoogleTest.
+void UnitTestImpl::ConfigureStreamingOutput() {
+ const std::string& target = GTEST_FLAG(stream_result_to);
+ if (!target.empty()) {
+ const size_t pos = target.find(':');
+ if (pos != std::string::npos) {
+ listeners()->Append(new StreamingListener(target.substr(0, pos),
+ target.substr(pos+1)));
+ } else {
+ printf("WARNING: unrecognized streaming target \"%s\" ignored.\n",
+ target.c_str());
+ fflush(stdout);
+ }
+ }
+}
+#endif // GTEST_CAN_STREAM_RESULTS_
+
+// Performs initialization dependent upon flag values obtained in
+// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to
+// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest
+// this function is also called from RunAllTests. Since this function can be
+// called more than once, it has to be idempotent.
+void UnitTestImpl::PostFlagParsingInit() {
+ // Ensures that this function does not execute more than once.
+ if (!post_flag_parse_init_performed_) {
+ post_flag_parse_init_performed_ = true;
+
+#if defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)
+ // Register to send notifications about key process state changes.
+ listeners()->Append(new GTEST_CUSTOM_TEST_EVENT_LISTENER_());
+#endif // defined(GTEST_CUSTOM_TEST_EVENT_LISTENER_)
+
+#if GTEST_HAS_DEATH_TEST
+ InitDeathTestSubprocessControlInfo();
+ SuppressTestEventsIfInSubprocess();
+#endif // GTEST_HAS_DEATH_TEST
+
+ // Registers parameterized tests. This makes parameterized tests
+ // available to the UnitTest reflection API without running
+ // RUN_ALL_TESTS.
+ RegisterParameterizedTests();
+
+ // Configures listeners for XML output. This makes it possible for users
+ // to shut down the default XML output before invoking RUN_ALL_TESTS.
+ ConfigureXmlOutput();
+
+#if GTEST_CAN_STREAM_RESULTS_
+ // Configures listeners for streaming test results to the specified server.
+ ConfigureStreamingOutput();
+#endif // GTEST_CAN_STREAM_RESULTS_
+ }
+}
+
+// A predicate that checks the name of a TestCase against a known
+// value.
+//
+// This is used for implementation of the UnitTest class only. We put
+// it in the anonymous namespace to prevent polluting the outer
+// namespace.
+//
+// TestCaseNameIs is copyable.
+class TestCaseNameIs {
+ public:
+ // Constructor.
+ explicit TestCaseNameIs(const std::string& name)
+ : name_(name) {}
+
+ // Returns true iff the name of test_case matches name_.
+ bool operator()(const TestCase* test_case) const {
+ return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0;
+ }
+
+ private:
+ std::string name_;
+};
+
+// Finds and returns a TestCase with the given name. If one doesn't
+// exist, creates one and returns it. It's the CALLER'S
+// RESPONSIBILITY to ensure that this function is only called WHEN THE
+// TESTS ARE NOT SHUFFLED.
+//
+// Arguments:
+//
+// test_case_name: name of the test case
+// type_param: the name of the test case's type parameter, or NULL if
+// this is not a typed or a type-parameterized test case.
+// set_up_tc: pointer to the function that sets up the test case
+// tear_down_tc: pointer to the function that tears down the test case
+TestCase* UnitTestImpl::GetTestCase(const char* test_case_name,
+ const char* type_param,
+ Test::SetUpTestCaseFunc set_up_tc,
+ Test::TearDownTestCaseFunc tear_down_tc) {
+ // Can we find a TestCase with the given name?
+ const std::vector<TestCase*>::const_iterator test_case =
+ std::find_if(test_cases_.begin(), test_cases_.end(),
+ TestCaseNameIs(test_case_name));
+
+ if (test_case != test_cases_.end())
+ return *test_case;
+
+ // No. Let's create one.
+ TestCase* const new_test_case =
+ new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc);
+
+ // Is this a death test case?
+ if (internal::UnitTestOptions::MatchesFilter(test_case_name,
+ kDeathTestCaseFilter)) {
+ // Yes. Inserts the test case after the last death test case
+ // defined so far. This only works when the test cases haven't
+ // been shuffled. Otherwise we may end up running a death test
+ // after a non-death test.
+ ++last_death_test_case_;
+ test_cases_.insert(test_cases_.begin() + last_death_test_case_,
+ new_test_case);
+ } else {
+ // No. Appends to the end of the list.
+ test_cases_.push_back(new_test_case);
+ }
+
+ test_case_indices_.push_back(static_cast<int>(test_case_indices_.size()));
+ return new_test_case;
+}
+
+// Helpers for setting up / tearing down the given environment. They
+// are for use in the ForEach() function.
+static void SetUpEnvironment(Environment* env) { env->SetUp(); }
+static void TearDownEnvironment(Environment* env) { env->TearDown(); }
+
+// Runs all tests in this UnitTest object, prints the result, and
+// returns true if all tests are successful. If any exception is
+// thrown during a test, the test is considered to be failed, but the
+// rest of the tests will still be run.
+//
+// When parameterized tests are enabled, it expands and registers
+// parameterized tests first in RegisterParameterizedTests().
+// All other functions called from RunAllTests() may safely assume that
+// parameterized tests are ready to be counted and run.
+bool UnitTestImpl::RunAllTests() {
+ // Makes sure InitGoogleTest() was called.
+ if (!GTestIsInitialized()) {
+ printf("%s",
+ "\nThis test program did NOT call ::testing::InitGoogleTest "
+ "before calling RUN_ALL_TESTS(). Please fix it.\n");
+ return false;
+ }
+
+ // Do not run any test if the --help flag was specified.
+ if (g_help_flag)
+ return true;
+
+ // Repeats the call to the post-flag parsing initialization in case the
+ // user didn't call InitGoogleTest.
+ PostFlagParsingInit();
+
+ // Even if sharding is not on, test runners may want to use the
+ // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding
+ // protocol.
+ internal::WriteToShardStatusFileIfNeeded();
+
+ // True iff we are in a subprocess for running a thread-safe-style
+ // death test.
+ bool in_subprocess_for_death_test = false;
+
+#if GTEST_HAS_DEATH_TEST
+ in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL);
+# if defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)
+ if (in_subprocess_for_death_test) {
+ GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_();
+ }
+# endif // defined(GTEST_EXTRA_DEATH_TEST_CHILD_SETUP_)
+#endif // GTEST_HAS_DEATH_TEST
+
+ const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex,
+ in_subprocess_for_death_test);
+
+ // Compares the full test names with the filter to decide which
+ // tests to run.
+ const bool has_tests_to_run = FilterTests(should_shard
+ ? HONOR_SHARDING_PROTOCOL
+ : IGNORE_SHARDING_PROTOCOL) > 0;
+
+ // Lists the tests and exits if the --gtest_list_tests flag was specified.
+ if (GTEST_FLAG(list_tests)) {
+ // This must be called *after* FilterTests() has been called.
+ ListTestsMatchingFilter();
+ return true;
+ }
+
+ random_seed_ = GTEST_FLAG(shuffle) ?
+ GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0;
+
+ // True iff at least one test has failed.
+ bool failed = false;
+
+ TestEventListener* repeater = listeners()->repeater();
+
+ start_timestamp_ = GetTimeInMillis();
+ repeater->OnTestProgramStart(*parent_);
+
+ // How many times to repeat the tests? We don't want to repeat them
+ // when we are inside the subprocess of a death test.
+ const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat);
+ // Repeats forever if the repeat count is negative.
+ const bool forever = repeat < 0;
+ for (int i = 0; forever || i != repeat; i++) {
+ // We want to preserve failures generated by ad-hoc test
+ // assertions executed before RUN_ALL_TESTS().
+ ClearNonAdHocTestResult();
+
+ const TimeInMillis start = GetTimeInMillis();
+
+ // Shuffles test cases and tests if requested.
+ if (has_tests_to_run && GTEST_FLAG(shuffle)) {
+ random()->Reseed(random_seed_);
+ // This should be done before calling OnTestIterationStart(),
+ // such that a test event listener can see the actual test order
+ // in the event.
+ ShuffleTests();
+ }
+
+ // Tells the unit test event listeners that the tests are about to start.
+ repeater->OnTestIterationStart(*parent_, i);
+
+ // Runs each test case if there is at least one test to run.
+ if (has_tests_to_run) {
+ // Sets up all environments beforehand.
+ repeater->OnEnvironmentsSetUpStart(*parent_);
+ ForEach(environments_, SetUpEnvironment);
+ repeater->OnEnvironmentsSetUpEnd(*parent_);
+
+ // Runs the tests only if there was no fatal failure during global
+ // set-up.
+ if (!Test::HasFatalFailure()) {
+ for (int test_index = 0; test_index < total_test_case_count();
+ test_index++) {
+ GetMutableTestCase(test_index)->Run();
+ }
+ }
+
+ // Tears down all environments in reverse order afterwards.
+ repeater->OnEnvironmentsTearDownStart(*parent_);
+ std::for_each(environments_.rbegin(), environments_.rend(),
+ TearDownEnvironment);
+ repeater->OnEnvironmentsTearDownEnd(*parent_);
+ }
+
+ elapsed_time_ = GetTimeInMillis() - start;
+
+ // Tells the unit test event listener that the tests have just finished.
+ repeater->OnTestIterationEnd(*parent_, i);
+
+ // Gets the result and clears it.
+ if (!Passed()) {
+ failed = true;
+ }
+
+ // Restores the original test order after the iteration. This
+ // allows the user to quickly repro a failure that happens in the
+ // N-th iteration without repeating the first (N - 1) iterations.
+ // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in
+ // case the user somehow changes the value of the flag somewhere
+ // (it's always safe to unshuffle the tests).
+ UnshuffleTests();
+
+ if (GTEST_FLAG(shuffle)) {
+ // Picks a new random seed for each iteration.
+ random_seed_ = GetNextRandomSeed(random_seed_);
+ }
+ }
+
+ repeater->OnTestProgramEnd(*parent_);
+
+ return !failed;
+}
+
+// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file
+// if the variable is present. If a file already exists at this location, this
+// function will write over it. If the variable is present, but the file cannot
+// be created, prints an error and exits.
+void WriteToShardStatusFileIfNeeded() {
+ const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile);
+ if (test_shard_file != NULL) {
+ FILE* const file = posix::FOpen(test_shard_file, "w");
+ if (file == NULL) {
+ ColoredPrintf(COLOR_RED,
+ "Could not write to the test shard status file \"%s\" "
+ "specified by the %s environment variable.\n",
+ test_shard_file, kTestShardStatusFile);
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+ fclose(file);
+ }
+}
+
+// Checks whether sharding is enabled by examining the relevant
+// environment variable values. If the variables are present,
+// but inconsistent (i.e., shard_index >= total_shards), prints
+// an error and exits. If in_subprocess_for_death_test, sharding is
+// disabled because it must only be applied to the original test
+// process. Otherwise, we could filter out death tests we intended to execute.
+bool ShouldShard(const char* total_shards_env,
+ const char* shard_index_env,
+ bool in_subprocess_for_death_test) {
+ if (in_subprocess_for_death_test) {
+ return false;
+ }
+
+ const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1);
+ const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1);
+
+ if (total_shards == -1 && shard_index == -1) {
+ return false;
+ } else if (total_shards == -1 && shard_index != -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestShardIndex << " = " << shard_index
+ << ", but have left " << kTestTotalShards << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (total_shards != -1 && shard_index == -1) {
+ const Message msg = Message()
+ << "Invalid environment variables: you have "
+ << kTestTotalShards << " = " << total_shards
+ << ", but have left " << kTestShardIndex << " unset.\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ } else if (shard_index < 0 || shard_index >= total_shards) {
+ const Message msg = Message()
+ << "Invalid environment variables: we require 0 <= "
+ << kTestShardIndex << " < " << kTestTotalShards
+ << ", but you have " << kTestShardIndex << "=" << shard_index
+ << ", " << kTestTotalShards << "=" << total_shards << ".\n";
+ ColoredPrintf(COLOR_RED, msg.GetString().c_str());
+ fflush(stdout);
+ exit(EXIT_FAILURE);
+ }
+
+ return total_shards > 1;
+}
+
+// Parses the environment variable var as an Int32. If it is unset,
+// returns default_val. If it is not an Int32, prints an error
+// and aborts.
+Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) {
+ const char* str_val = posix::GetEnv(var);
+ if (str_val == NULL) {
+ return default_val;
+ }
+
+ Int32 result;
+ if (!ParseInt32(Message() << "The value of environment variable " << var,
+ str_val, &result)) {
+ exit(EXIT_FAILURE);
+ }
+ return result;
+}
+
+// Given the total number of shards, the shard index, and the test id,
+// returns true iff the test should be run on this shard. The test id is
+// some arbitrary but unique non-negative integer assigned to each test
+// method. Assumes that 0 <= shard_index < total_shards.
+bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) {
+ return (test_id % total_shards) == shard_index;
+}
+
+// Compares the name of each test with the user-specified filter to
+// decide whether the test should be run, then records the result in
+// each TestCase and TestInfo object.
+// If shard_tests == true, further filters tests based on sharding
+// variables in the environment - see
+// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide.
+// Returns the number of tests that should run.
+int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) {
+ const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestTotalShards, -1) : -1;
+ const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ?
+ Int32FromEnvOrDie(kTestShardIndex, -1) : -1;
+
+ // num_runnable_tests are the number of tests that will
+ // run across all shards (i.e., match filter and are not disabled).
+ // num_selected_tests are the number of tests to be run on
+ // this shard.
+ int num_runnable_tests = 0;
+ int num_selected_tests = 0;
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ TestCase* const test_case = test_cases_[i];
+ const std::string &test_case_name = test_case->name();
+ test_case->set_should_run(false);
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ TestInfo* const test_info = test_case->test_info_list()[j];
+ const std::string test_name(test_info->name());
+ // A test is disabled if test case name or test name matches
+ // kDisableTestFilter.
+ const bool is_disabled =
+ internal::UnitTestOptions::MatchesFilter(test_case_name,
+ kDisableTestFilter) ||
+ internal::UnitTestOptions::MatchesFilter(test_name,
+ kDisableTestFilter);
+ test_info->is_disabled_ = is_disabled;
+
+ const bool matches_filter =
+ internal::UnitTestOptions::FilterMatchesTest(test_case_name,
+ test_name);
+ test_info->matches_filter_ = matches_filter;
+
+ const bool is_runnable =
+ (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) &&
+ matches_filter;
+
+ const bool is_selected = is_runnable &&
+ (shard_tests == IGNORE_SHARDING_PROTOCOL ||
+ ShouldRunTestOnShard(total_shards, shard_index,
+ num_runnable_tests));
+
+ num_runnable_tests += is_runnable;
+ num_selected_tests += is_selected;
+
+ test_info->should_run_ = is_selected;
+ test_case->set_should_run(test_case->should_run() || is_selected);
+ }
+ }
+ return num_selected_tests;
+}
+
+// Prints the given C-string on a single line by replacing all '\n'
+// characters with string "\\n". If the output takes more than
+// max_length characters, only prints the first max_length characters
+// and "...".
+static void PrintOnOneLine(const char* str, int max_length) {
+ if (str != NULL) {
+ for (int i = 0; *str != '\0'; ++str) {
+ if (i >= max_length) {
+ printf("...");
+ break;
+ }
+ if (*str == '\n') {
+ printf("\\n");
+ i += 2;
+ } else {
+ printf("%c", *str);
+ ++i;
+ }
+ }
+ }
+}
+
+// Prints the names of the tests matching the user-specified filter flag.
+void UnitTestImpl::ListTestsMatchingFilter() {
+ // Print at most this many characters for each type/value parameter.
+ const int kMaxParamLength = 250;
+
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ const TestCase* const test_case = test_cases_[i];
+ bool printed_test_case_name = false;
+
+ for (size_t j = 0; j < test_case->test_info_list().size(); j++) {
+ const TestInfo* const test_info =
+ test_case->test_info_list()[j];
+ if (test_info->matches_filter_) {
+ if (!printed_test_case_name) {
+ printed_test_case_name = true;
+ printf("%s.", test_case->name());
+ if (test_case->type_param() != NULL) {
+ printf(" # %s = ", kTypeParamLabel);
+ // We print the type parameter on a single line to make
+ // the output easy to parse by a program.
+ PrintOnOneLine(test_case->type_param(), kMaxParamLength);
+ }
+ printf("\n");
+ }
+ printf(" %s", test_info->name());
+ if (test_info->value_param() != NULL) {
+ printf(" # %s = ", kValueParamLabel);
+ // We print the value parameter on a single line to make the
+ // output easy to parse by a program.
+ PrintOnOneLine(test_info->value_param(), kMaxParamLength);
+ }
+ printf("\n");
+ }
+ }
+ }
+ fflush(stdout);
+}
+
+// Sets the OS stack trace getter.
+//
+// Does nothing if the input and the current OS stack trace getter are
+// the same; otherwise, deletes the old getter and makes the input the
+// current getter.
+void UnitTestImpl::set_os_stack_trace_getter(
+ OsStackTraceGetterInterface* getter) {
+ if (os_stack_trace_getter_ != getter) {
+ delete os_stack_trace_getter_;
+ os_stack_trace_getter_ = getter;
+ }
+}
+
+// Returns the current OS stack trace getter if it is not NULL;
+// otherwise, creates an OsStackTraceGetter, makes it the current
+// getter, and returns it.
+OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() {
+ if (os_stack_trace_getter_ == NULL) {
+#ifdef GTEST_OS_STACK_TRACE_GETTER_
+ os_stack_trace_getter_ = new GTEST_OS_STACK_TRACE_GETTER_;
+#else
+ os_stack_trace_getter_ = new OsStackTraceGetter;
+#endif // GTEST_OS_STACK_TRACE_GETTER_
+ }
+
+ return os_stack_trace_getter_;
+}
+
+// Returns the TestResult for the test that's currently running, or
+// the TestResult for the ad hoc test if no test is running.
+TestResult* UnitTestImpl::current_test_result() {
+ return current_test_info_ ?
+ &(current_test_info_->result_) : &ad_hoc_test_result_;
+}
+
+// Shuffles all test cases, and the tests within each test case,
+// making sure that death tests are still run first.
+void UnitTestImpl::ShuffleTests() {
+ // Shuffles the death test cases.
+ ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_);
+
+ // Shuffles the non-death test cases.
+ ShuffleRange(random(), last_death_test_case_ + 1,
+ static_cast<int>(test_cases_.size()), &test_case_indices_);
+
+ // Shuffles the tests inside each test case.
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ test_cases_[i]->ShuffleTests(random());
+ }
+}
+
+// Restores the test cases and tests to their order before the first shuffle.
+void UnitTestImpl::UnshuffleTests() {
+ for (size_t i = 0; i < test_cases_.size(); i++) {
+ // Unshuffles the tests in each test case.
+ test_cases_[i]->UnshuffleTests();
+ // Resets the index of each test case.
+ test_case_indices_[i] = static_cast<int>(i);
+ }
+}
+
+// Returns the current OS stack trace as an std::string.
+//
+// The maximum number of stack frames to be included is specified by
+// the gtest_stack_trace_depth flag. The skip_count parameter
+// specifies the number of top frames to be skipped, which doesn't
+// count against the number of frames to be included.
+//
+// For example, if Foo() calls Bar(), which in turn calls
+// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in
+// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't.
+std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/,
+ int skip_count) {
+ // We pass skip_count + 1 to skip this wrapper function in addition
+ // to what the user really wants to skip.
+ return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1);
+}
+
+// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to
+// suppress unreachable code warnings.
+namespace {
+class ClassUniqueToAlwaysTrue {};
+}
+
+bool IsTrue(bool condition) { return condition; }
+
+bool AlwaysTrue() {
+#if GTEST_HAS_EXCEPTIONS
+ // This condition is always false so AlwaysTrue() never actually throws,
+ // but it makes the compiler think that it may throw.
+ if (IsTrue(false))
+ throw ClassUniqueToAlwaysTrue();
+#endif // GTEST_HAS_EXCEPTIONS
+ return true;
+}
+
+// If *pstr starts with the given prefix, modifies *pstr to be right
+// past the prefix and returns true; otherwise leaves *pstr unchanged
+// and returns false. None of pstr, *pstr, and prefix can be NULL.
+bool SkipPrefix(const char* prefix, const char** pstr) {
+ const size_t prefix_len = strlen(prefix);
+ if (strncmp(*pstr, prefix, prefix_len) == 0) {
+ *pstr += prefix_len;
+ return true;
+ }
+ return false;
+}
+
+// Parses a string as a command line flag. The string should have
+// the format "--flag=value". When def_optional is true, the "=value"
+// part can be omitted.
+//
+// Returns the value of the flag, or NULL if the parsing failed.
+const char* ParseFlagValue(const char* str,
+ const char* flag,
+ bool def_optional) {
+ // str and flag must not be NULL.
+ if (str == NULL || flag == NULL) return NULL;
+
+ // The flag must start with "--" followed by GTEST_FLAG_PREFIX_.
+ const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag;
+ const size_t flag_len = flag_str.length();
+ if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL;
+
+ // Skips the flag name.
+ const char* flag_end = str + flag_len;
+
+ // When def_optional is true, it's OK to not have a "=value" part.
+ if (def_optional && (flag_end[0] == '\0')) {
+ return flag_end;
+ }
+
+ // If def_optional is true and there are more characters after the
+ // flag name, or if def_optional is false, there must be a '=' after
+ // the flag name.
+ if (flag_end[0] != '=') return NULL;
+
+ // Returns the string after "=".
+ return flag_end + 1;
+}
+
+// Parses a string for a bool flag, in the form of either
+// "--flag=value" or "--flag".
+//
+// In the former case, the value is taken as true as long as it does
+// not start with '0', 'f', or 'F'.
+//
+// In the latter case, the value is taken as true.
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseBoolFlag(const char* str, const char* flag, bool* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, true);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Converts the string value to a bool.
+ *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F');
+ return true;
+}
+
+// Parses a string for an Int32 flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseInt32Flag(const char* str, const char* flag, Int32* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ return ParseInt32(Message() << "The value of flag --" << flag,
+ value_str, value);
+}
+
+// Parses a string for a string flag, in the form of
+// "--flag=value".
+//
+// On success, stores the value of the flag in *value, and returns
+// true. On failure, returns false without changing *value.
+bool ParseStringFlag(const char* str, const char* flag, std::string* value) {
+ // Gets the value of the flag as a string.
+ const char* const value_str = ParseFlagValue(str, flag, false);
+
+ // Aborts if the parsing failed.
+ if (value_str == NULL) return false;
+
+ // Sets *value to the value of the flag.
+ *value = value_str;
+ return true;
+}
+
+// Determines whether a string has a prefix that Google Test uses for its
+// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_.
+// If Google Test detects that a command line flag has its prefix but is not
+// recognized, it will print its help message. Flags starting with
+// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test
+// internal flags and do not trigger the help message.
+static bool HasGoogleTestFlagPrefix(const char* str) {
+ return (SkipPrefix("--", &str) ||
+ SkipPrefix("-", &str) ||
+ SkipPrefix("/", &str)) &&
+ !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) &&
+ (SkipPrefix(GTEST_FLAG_PREFIX_, &str) ||
+ SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str));
+}
+
+// Prints a string containing code-encoded text. The following escape
+// sequences can be used in the string to control the text color:
+//
+// @@ prints a single '@' character.
+// @R changes the color to red.
+// @G changes the color to green.
+// @Y changes the color to yellow.
+// @D changes to the default terminal text color.
+//
+// TODO(wan@google.com): Write tests for this once we add stdout
+// capturing to Google Test.
+static void PrintColorEncoded(const char* str) {
+ GTestColor color = COLOR_DEFAULT; // The current color.
+
+ // Conceptually, we split the string into segments divided by escape
+ // sequences. Then we print one segment at a time. At the end of
+ // each iteration, the str pointer advances to the beginning of the
+ // next segment.
+ for (;;) {
+ const char* p = strchr(str, '@');
+ if (p == NULL) {
+ ColoredPrintf(color, "%s", str);
+ return;
+ }
+
+ ColoredPrintf(color, "%s", std::string(str, p).c_str());
+
+ const char ch = p[1];
+ str = p + 2;
+ if (ch == '@') {
+ ColoredPrintf(color, "@");
+ } else if (ch == 'D') {
+ color = COLOR_DEFAULT;
+ } else if (ch == 'R') {
+ color = COLOR_RED;
+ } else if (ch == 'G') {
+ color = COLOR_GREEN;
+ } else if (ch == 'Y') {
+ color = COLOR_YELLOW;
+ } else {
+ --str;
+ }
+ }
+}
+
+static const char kColorEncodedHelpMessage[] =
+"This program contains tests written using " GTEST_NAME_ ". You can use the\n"
+"following command line flags to control its behavior:\n"
+"\n"
+"Test Selection:\n"
+" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n"
+" List the names of all tests instead of running them. The name of\n"
+" TEST(Foo, Bar) is \"Foo.Bar\".\n"
+" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS"
+ "[@G-@YNEGATIVE_PATTERNS]@D\n"
+" Run only the tests whose name matches one of the positive patterns but\n"
+" none of the negative patterns. '?' matches any single character; '*'\n"
+" matches any substring; ':' separates two patterns.\n"
+" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n"
+" Run all disabled tests too.\n"
+"\n"
+"Test Execution:\n"
+" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n"
+" Run the tests repeatedly; use a negative count to repeat forever.\n"
+" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n"
+" Randomize tests' orders on every iteration.\n"
+" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n"
+" Random number seed to use for shuffling test orders (between 1 and\n"
+" 99999, or 0 to use a seed based on the current time).\n"
+"\n"
+"Test Output:\n"
+" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n"
+" Enable/disable colored output. The default is @Gauto@D.\n"
+" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n"
+" Don't print the elapsed time of each test.\n"
+" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G"
+ GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n"
+" Generate an XML report in the given directory or with the given file\n"
+" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n"
+#if GTEST_CAN_STREAM_RESULTS_
+" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n"
+" Stream test results to the given server.\n"
+#endif // GTEST_CAN_STREAM_RESULTS_
+"\n"
+"Assertion Behavior:\n"
+#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n"
+" Set the default death test style.\n"
+#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS
+" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n"
+" Turn assertion failures into debugger break-points.\n"
+" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n"
+" Turn assertion failures into C++ exceptions.\n"
+" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n"
+" Do not report exceptions as test failures. Instead, allow them\n"
+" to crash the program or throw a pop-up (on Windows).\n"
+"\n"
+"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set "
+ "the corresponding\n"
+"environment variable of a flag (all letters in upper-case). For example, to\n"
+"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_
+ "color=no@D or set\n"
+"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n"
+"\n"
+"For more information, please read the " GTEST_NAME_ " documentation at\n"
+"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n"
+"(not one in your own code or tests), please report it to\n"
+"@G<" GTEST_DEV_EMAIL_ ">@D.\n";
+
+bool ParseGoogleTestFlag(const char* const arg) {
+ return ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag,
+ &GTEST_FLAG(also_run_disabled_tests)) ||
+ ParseBoolFlag(arg, kBreakOnFailureFlag,
+ &GTEST_FLAG(break_on_failure)) ||
+ ParseBoolFlag(arg, kCatchExceptionsFlag,
+ &GTEST_FLAG(catch_exceptions)) ||
+ ParseStringFlag(arg, kColorFlag, &GTEST_FLAG(color)) ||
+ ParseStringFlag(arg, kDeathTestStyleFlag,
+ &GTEST_FLAG(death_test_style)) ||
+ ParseBoolFlag(arg, kDeathTestUseFork,
+ &GTEST_FLAG(death_test_use_fork)) ||
+ ParseStringFlag(arg, kFilterFlag, &GTEST_FLAG(filter)) ||
+ ParseStringFlag(arg, kInternalRunDeathTestFlag,
+ &GTEST_FLAG(internal_run_death_test)) ||
+ ParseBoolFlag(arg, kListTestsFlag, &GTEST_FLAG(list_tests)) ||
+ ParseStringFlag(arg, kOutputFlag, &GTEST_FLAG(output)) ||
+ ParseBoolFlag(arg, kPrintTimeFlag, &GTEST_FLAG(print_time)) ||
+ ParseInt32Flag(arg, kRandomSeedFlag, &GTEST_FLAG(random_seed)) ||
+ ParseInt32Flag(arg, kRepeatFlag, &GTEST_FLAG(repeat)) ||
+ ParseBoolFlag(arg, kShuffleFlag, &GTEST_FLAG(shuffle)) ||
+ ParseInt32Flag(arg, kStackTraceDepthFlag,
+ &GTEST_FLAG(stack_trace_depth)) ||
+ ParseStringFlag(arg, kStreamResultToFlag,
+ &GTEST_FLAG(stream_result_to)) ||
+ ParseBoolFlag(arg, kThrowOnFailureFlag,
+ &GTEST_FLAG(throw_on_failure));
+}
+
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+void LoadFlagsFromFile(const std::string& path) {
+ FILE* flagfile = posix::FOpen(path.c_str(), "r");
+ if (!flagfile) {
+ fprintf(stderr,
+ "Unable to open file \"%s\"\n",
+ GTEST_FLAG(flagfile).c_str());
+ fflush(stderr);
+ exit(EXIT_FAILURE);
+ }
+ std::string contents(ReadEntireFile(flagfile));
+ posix::FClose(flagfile);
+ std::vector<std::string> lines;
+ SplitString(contents, '\n', &lines);
+ for (size_t i = 0; i < lines.size(); ++i) {
+ if (lines[i].empty())
+ continue;
+ if (!ParseGoogleTestFlag(lines[i].c_str()))
+ g_help_flag = true;
+ }
+}
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test. The type parameter CharType can be
+// instantiated to either char or wchar_t.
+template <typename CharType>
+void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) {
+ for (int i = 1; i < *argc; i++) {
+ const std::string arg_string = StreamableToString(argv[i]);
+ const char* const arg = arg_string.c_str();
+
+ using internal::ParseBoolFlag;
+ using internal::ParseInt32Flag;
+ using internal::ParseStringFlag;
+
+ bool remove_flag = false;
+ if (ParseGoogleTestFlag(arg)) {
+ remove_flag = true;
+#if GTEST_USE_OWN_FLAGFILE_FLAG_
+ } else if (ParseStringFlag(arg, kFlagfileFlag, &GTEST_FLAG(flagfile))) {
+ LoadFlagsFromFile(GTEST_FLAG(flagfile));
+ remove_flag = true;
+#endif // GTEST_USE_OWN_FLAGFILE_FLAG_
+ } else if (arg_string == "--help" || arg_string == "-h" ||
+ arg_string == "-?" || arg_string == "/?" ||
+ HasGoogleTestFlagPrefix(arg)) {
+ // Both help flag and unrecognized Google Test flags (excluding
+ // internal ones) trigger help display.
+ g_help_flag = true;
+ }
+
+ if (remove_flag) {
+ // Shift the remainder of the argv list left by one. Note
+ // that argv has (*argc + 1) elements, the last one always being
+ // NULL. The following loop moves the trailing NULL element as
+ // well.
+ for (int j = i; j != *argc; j++) {
+ argv[j] = argv[j + 1];
+ }
+
+ // Decrements the argument count.
+ (*argc)--;
+
+ // We also need to decrement the iterator as we just removed
+ // an element.
+ i--;
+ }
+ }
+
+ if (g_help_flag) {
+ // We print the help here instead of in RUN_ALL_TESTS(), as the
+ // latter may not be called at all if the user is using Google
+ // Test with another testing framework.
+ PrintColorEncoded(kColorEncodedHelpMessage);
+ }
+}
+
+// Parses the command line for Google Test flags, without initializing
+// other parts of Google Test.
+void ParseGoogleTestFlagsOnly(int* argc, char** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) {
+ ParseGoogleTestFlagsOnlyImpl(argc, argv);
+}
+
+// The internal implementation of InitGoogleTest().
+//
+// The type parameter CharType can be instantiated to either char or
+// wchar_t.
+template <typename CharType>
+void InitGoogleTestImpl(int* argc, CharType** argv) {
+ // We don't want to run the initialization code twice.
+ if (GTestIsInitialized()) return;
+
+ if (*argc <= 0) return;
+
+ g_argvs.clear();
+ for (int i = 0; i != *argc; i++) {
+ g_argvs.push_back(StreamableToString(argv[i]));
+ }
+
+ ParseGoogleTestFlagsOnly(argc, argv);
+ GetUnitTestImpl()->PostFlagParsingInit();
+}
+
+} // namespace internal
+
+// Initializes Google Test. This must be called before calling
+// RUN_ALL_TESTS(). In particular, it parses a command line for the
+// flags that Google Test recognizes. Whenever a Google Test flag is
+// seen, it is removed from argv, and *argc is decremented.
+//
+// No value is returned. Instead, the Google Test flag variables are
+// updated.
+//
+// Calling the function for the second time has no user-visible effect.
+void InitGoogleTest(int* argc, char** argv) {
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);
+#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ internal::InitGoogleTestImpl(argc, argv);
+#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+// This overloaded version can be used in Windows programs compiled in
+// UNICODE mode.
+void InitGoogleTest(int* argc, wchar_t** argv) {
+#if defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_(argc, argv);
+#else // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+ internal::InitGoogleTestImpl(argc, argv);
+#endif // defined(GTEST_CUSTOM_INIT_GOOGLE_TEST_FUNCTION_)
+}
+
+} // namespace testing
diff --git a/media/libaom/src/third_party/googletest/src/googletest/src/gtest_main.cc b/media/libaom/src/third_party/googletest/src/googletest/src/gtest_main.cc
new file mode 100644
index 000000000..f30282255
--- /dev/null
+++ b/media/libaom/src/third_party/googletest/src/googletest/src/gtest_main.cc
@@ -0,0 +1,38 @@
+// Copyright 2006, Google Inc.
+// All rights reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdio.h>
+
+#include "gtest/gtest.h"
+
+GTEST_API_ int main(int argc, char **argv) {
+ printf("Running main() from gtest_main.cc\n");
+ testing::InitGoogleTest(&argc, argv);
+ return RUN_ALL_TESTS();
+}
diff --git a/media/libaom/src/third_party/libwebm/AUTHORS.TXT b/media/libaom/src/third_party/libwebm/AUTHORS.TXT
new file mode 100644
index 000000000..9686ac13e
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/AUTHORS.TXT
@@ -0,0 +1,4 @@
+# Names should be added to this file like so:
+# Name or Organization <email address>
+
+Google Inc.
diff --git a/media/libaom/src/third_party/libwebm/Android.mk b/media/libaom/src/third_party/libwebm/Android.mk
new file mode 100644
index 000000000..b46ba101d
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/Android.mk
@@ -0,0 +1,17 @@
+LOCAL_PATH:= $(call my-dir)
+
+include $(CLEAR_VARS)
+LOCAL_MODULE:= libwebm
+LOCAL_CPPFLAGS:=-D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS
+LOCAL_CPPFLAGS+=-D__STDC_LIMIT_MACROS -std=c++11
+LOCAL_C_INCLUDES:= $(LOCAL_PATH)
+LOCAL_EXPORT_C_INCLUDES:= $(LOCAL_PATH)
+
+LOCAL_SRC_FILES:= common/file_util.cc \
+ common/hdr_util.cc \
+ mkvparser/mkvparser.cc \
+ mkvparser/mkvreader.cc \
+ mkvmuxer/mkvmuxer.cc \
+ mkvmuxer/mkvmuxerutil.cc \
+ mkvmuxer/mkvwriter.cc
+include $(BUILD_STATIC_LIBRARY)
diff --git a/media/libaom/src/third_party/libwebm/LICENSE.TXT b/media/libaom/src/third_party/libwebm/LICENSE.TXT
new file mode 100644
index 000000000..7a6f99547
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/LICENSE.TXT
@@ -0,0 +1,30 @@
+Copyright (c) 2010, Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Google nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/media/libaom/src/third_party/libwebm/PATENTS.TXT b/media/libaom/src/third_party/libwebm/PATENTS.TXT
new file mode 100644
index 000000000..caedf607e
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/PATENTS.TXT
@@ -0,0 +1,23 @@
+Additional IP Rights Grant (Patents)
+------------------------------------
+
+"These implementations" means the copyrightable works that implement the WebM
+codecs distributed by Google as part of the WebM Project.
+
+Google hereby grants to you a perpetual, worldwide, non-exclusive, no-charge,
+royalty-free, irrevocable (except as stated in this section) patent license to
+make, have made, use, offer to sell, sell, import, transfer, and otherwise
+run, modify and propagate the contents of these implementations of WebM, where
+such license applies only to those patent claims, both currently owned by
+Google and acquired in the future, licensable by Google that are necessarily
+infringed by these implementations of WebM. This grant does not include claims
+that would be infringed only as a consequence of further modification of these
+implementations. If you or your agent or exclusive licensee institute or order
+or agree to the institution of patent litigation or any other patent
+enforcement activity against any entity (including a cross-claim or
+counterclaim in a lawsuit) alleging that any of these implementations of WebM
+or any code incorporated within any of these implementations of WebM
+constitute direct or contributory patent infringement, or inducement of
+patent infringement, then any patent rights granted to you under this License
+for these implementations of WebM shall terminate as of the date such
+litigation is filed.
diff --git a/media/libaom/src/third_party/libwebm/README.libaom b/media/libaom/src/third_party/libwebm/README.libaom
new file mode 100644
index 000000000..bd288d201
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/README.libaom
@@ -0,0 +1,22 @@
+URL: https://chromium.googlesource.com/webm/libwebm
+Version: af81f26025b7435fa9a14ad07c58b44cf9280430
+License: BSD
+License File: LICENSE.txt
+
+Description:
+libwebm is used to handle WebM container I/O.
+
+Local Changes:
+Add av1 codec as an eligible codec for webm:
+ https://aomedia-review.googlesource.com/c/aom/+/15103
+Only keep:
+ - Android.mk
+ - AUTHORS.TXT
+ - common/
+ file_util.cc/h
+ hdr_util.cc/h
+ webmids.h
+ - LICENSE.TXT
+ - mkvmuxer/
+ - mkvparser/
+ - PATENTS.TXT
diff --git a/media/libaom/src/third_party/libwebm/common/file_util.cc b/media/libaom/src/third_party/libwebm/common/file_util.cc
new file mode 100644
index 000000000..618ffc087
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/common/file_util.cc
@@ -0,0 +1,93 @@
+// Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#include "common/file_util.h"
+
+#include <sys/stat.h>
+#ifndef _MSC_VER
+#include <unistd.h> // close()
+#endif
+
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <fstream>
+#include <ios>
+#include <string>
+
+namespace libwebm {
+
+std::string GetTempFileName() {
+#if !defined _MSC_VER && !defined __MINGW32__
+ std::string temp_file_name_template_str =
+ std::string(std::getenv("TEST_TMPDIR") ? std::getenv("TEST_TMPDIR") :
+ ".") +
+ "/libwebm_temp.XXXXXX";
+ char* temp_file_name_template =
+ new char[temp_file_name_template_str.length() + 1];
+ memset(temp_file_name_template, 0, temp_file_name_template_str.length() + 1);
+ temp_file_name_template_str.copy(temp_file_name_template,
+ temp_file_name_template_str.length(), 0);
+ int fd = mkstemp(temp_file_name_template);
+ std::string temp_file_name =
+ (fd != -1) ? std::string(temp_file_name_template) : std::string();
+ delete[] temp_file_name_template;
+ if (fd != -1) {
+ close(fd);
+ }
+ return temp_file_name;
+#else
+ char tmp_file_name[_MAX_PATH];
+#if defined _MSC_VER || defined MINGW_HAS_SECURE_API
+ errno_t err = tmpnam_s(tmp_file_name);
+#else
+ char* fname_pointer = tmpnam(tmp_file_name);
+ errno_t err = (fname_pointer == &tmp_file_name[0]) ? 0 : -1;
+#endif
+ if (err == 0) {
+ return std::string(tmp_file_name);
+ }
+ return std::string();
+#endif
+}
+
+uint64_t GetFileSize(const std::string& file_name) {
+ uint64_t file_size = 0;
+#ifndef _MSC_VER
+ struct stat st;
+ st.st_size = 0;
+ if (stat(file_name.c_str(), &st) == 0) {
+#else
+ struct _stat st;
+ st.st_size = 0;
+ if (_stat(file_name.c_str(), &st) == 0) {
+#endif
+ file_size = st.st_size;
+ }
+ return file_size;
+}
+
+bool GetFileContents(const std::string& file_name, std::string* contents) {
+ std::ifstream file(file_name.c_str());
+ *contents = std::string(static_cast<size_t>(GetFileSize(file_name)), 0);
+ if (file.good() && contents->size()) {
+ file.read(&(*contents)[0], contents->size());
+ }
+ return !file.fail();
+}
+
+TempFileDeleter::TempFileDeleter() { file_name_ = GetTempFileName(); }
+
+TempFileDeleter::~TempFileDeleter() {
+ std::ifstream file(file_name_.c_str());
+ if (file.good()) {
+ file.close();
+ std::remove(file_name_.c_str());
+ }
+}
+
+} // namespace libwebm
diff --git a/media/libaom/src/third_party/libwebm/common/file_util.h b/media/libaom/src/third_party/libwebm/common/file_util.h
new file mode 100644
index 000000000..a87373464
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/common/file_util.h
@@ -0,0 +1,44 @@
+// Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#ifndef LIBWEBM_COMMON_FILE_UTIL_H_
+#define LIBWEBM_COMMON_FILE_UTIL_H_
+
+#include <stdint.h>
+
+#include <string>
+
+#include "mkvmuxer/mkvmuxertypes.h" // LIBWEBM_DISALLOW_COPY_AND_ASSIGN()
+
+namespace libwebm {
+
+// Returns a temporary file name.
+std::string GetTempFileName();
+
+// Returns size of file specified by |file_name|, or 0 upon failure.
+uint64_t GetFileSize(const std::string& file_name);
+
+// Gets the contents file_name as a string. Returns false on error.
+bool GetFileContents(const std::string& file_name, std::string* contents);
+
+// Manages life of temporary file specified at time of construction. Deletes
+// file upon destruction.
+class TempFileDeleter {
+ public:
+ TempFileDeleter();
+ explicit TempFileDeleter(std::string file_name) : file_name_(file_name) {}
+ ~TempFileDeleter();
+ const std::string& name() const { return file_name_; }
+
+ private:
+ std::string file_name_;
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(TempFileDeleter);
+};
+
+} // namespace libwebm
+
+#endif // LIBWEBM_COMMON_FILE_UTIL_H_
diff --git a/media/libaom/src/third_party/libwebm/common/hdr_util.cc b/media/libaom/src/third_party/libwebm/common/hdr_util.cc
new file mode 100644
index 000000000..916f7170b
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/common/hdr_util.cc
@@ -0,0 +1,220 @@
+// Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#include "hdr_util.h"
+
+#include <climits>
+#include <cstddef>
+#include <new>
+
+#include "mkvparser/mkvparser.h"
+
+namespace libwebm {
+const int Vp9CodecFeatures::kValueNotPresent = INT_MAX;
+
+bool CopyPrimaryChromaticity(const mkvparser::PrimaryChromaticity& parser_pc,
+ PrimaryChromaticityPtr* muxer_pc) {
+ muxer_pc->reset(new (std::nothrow)
+ mkvmuxer::PrimaryChromaticity(parser_pc.x, parser_pc.y));
+ if (!muxer_pc->get())
+ return false;
+ return true;
+}
+
+bool MasteringMetadataValuePresent(double value) {
+ return value != mkvparser::MasteringMetadata::kValueNotPresent;
+}
+
+bool CopyMasteringMetadata(const mkvparser::MasteringMetadata& parser_mm,
+ mkvmuxer::MasteringMetadata* muxer_mm) {
+ if (MasteringMetadataValuePresent(parser_mm.luminance_max))
+ muxer_mm->set_luminance_max(parser_mm.luminance_max);
+ if (MasteringMetadataValuePresent(parser_mm.luminance_min))
+ muxer_mm->set_luminance_min(parser_mm.luminance_min);
+
+ PrimaryChromaticityPtr r_ptr(nullptr);
+ PrimaryChromaticityPtr g_ptr(nullptr);
+ PrimaryChromaticityPtr b_ptr(nullptr);
+ PrimaryChromaticityPtr wp_ptr(nullptr);
+
+ if (parser_mm.r) {
+ if (!CopyPrimaryChromaticity(*parser_mm.r, &r_ptr))
+ return false;
+ }
+ if (parser_mm.g) {
+ if (!CopyPrimaryChromaticity(*parser_mm.g, &g_ptr))
+ return false;
+ }
+ if (parser_mm.b) {
+ if (!CopyPrimaryChromaticity(*parser_mm.b, &b_ptr))
+ return false;
+ }
+ if (parser_mm.white_point) {
+ if (!CopyPrimaryChromaticity(*parser_mm.white_point, &wp_ptr))
+ return false;
+ }
+
+ if (!muxer_mm->SetChromaticity(r_ptr.get(), g_ptr.get(), b_ptr.get(),
+ wp_ptr.get())) {
+ return false;
+ }
+
+ return true;
+}
+
+bool ColourValuePresent(long long value) {
+ return value != mkvparser::Colour::kValueNotPresent;
+}
+
+bool CopyColour(const mkvparser::Colour& parser_colour,
+ mkvmuxer::Colour* muxer_colour) {
+ if (!muxer_colour)
+ return false;
+
+ if (ColourValuePresent(parser_colour.matrix_coefficients))
+ muxer_colour->set_matrix_coefficients(parser_colour.matrix_coefficients);
+ if (ColourValuePresent(parser_colour.bits_per_channel))
+ muxer_colour->set_bits_per_channel(parser_colour.bits_per_channel);
+ if (ColourValuePresent(parser_colour.chroma_subsampling_horz)) {
+ muxer_colour->set_chroma_subsampling_horz(
+ parser_colour.chroma_subsampling_horz);
+ }
+ if (ColourValuePresent(parser_colour.chroma_subsampling_vert)) {
+ muxer_colour->set_chroma_subsampling_vert(
+ parser_colour.chroma_subsampling_vert);
+ }
+ if (ColourValuePresent(parser_colour.cb_subsampling_horz))
+ muxer_colour->set_cb_subsampling_horz(parser_colour.cb_subsampling_horz);
+ if (ColourValuePresent(parser_colour.cb_subsampling_vert))
+ muxer_colour->set_cb_subsampling_vert(parser_colour.cb_subsampling_vert);
+ if (ColourValuePresent(parser_colour.chroma_siting_horz))
+ muxer_colour->set_chroma_siting_horz(parser_colour.chroma_siting_horz);
+ if (ColourValuePresent(parser_colour.chroma_siting_vert))
+ muxer_colour->set_chroma_siting_vert(parser_colour.chroma_siting_vert);
+ if (ColourValuePresent(parser_colour.range))
+ muxer_colour->set_range(parser_colour.range);
+ if (ColourValuePresent(parser_colour.transfer_characteristics)) {
+ muxer_colour->set_transfer_characteristics(
+ parser_colour.transfer_characteristics);
+ }
+ if (ColourValuePresent(parser_colour.primaries))
+ muxer_colour->set_primaries(parser_colour.primaries);
+ if (ColourValuePresent(parser_colour.max_cll))
+ muxer_colour->set_max_cll(parser_colour.max_cll);
+ if (ColourValuePresent(parser_colour.max_fall))
+ muxer_colour->set_max_fall(parser_colour.max_fall);
+
+ if (parser_colour.mastering_metadata) {
+ mkvmuxer::MasteringMetadata muxer_mm;
+ if (!CopyMasteringMetadata(*parser_colour.mastering_metadata, &muxer_mm))
+ return false;
+ if (!muxer_colour->SetMasteringMetadata(muxer_mm))
+ return false;
+ }
+ return true;
+}
+
+// Format of VPx private data:
+//
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | ID Byte | Length | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ |
+// | |
+// : Bytes 1..Length of Codec Feature :
+// | |
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+//
+// ID Byte Format
+// ID byte is an unsigned byte.
+// 0 1 2 3 4 5 6 7
+// +-+-+-+-+-+-+-+-+
+// |X| ID |
+// +-+-+-+-+-+-+-+-+
+//
+// The X bit is reserved.
+//
+// See the following link for more information:
+// http://www.webmproject.org/vp9/profiles/
+bool ParseVpxCodecPrivate(const uint8_t* private_data, int32_t length,
+ Vp9CodecFeatures* features) {
+ const int kVpxCodecPrivateMinLength = 3;
+ if (!private_data || !features || length < kVpxCodecPrivateMinLength)
+ return false;
+
+ const uint8_t kVp9ProfileId = 1;
+ const uint8_t kVp9LevelId = 2;
+ const uint8_t kVp9BitDepthId = 3;
+ const uint8_t kVp9ChromaSubsamplingId = 4;
+ const int kVpxFeatureLength = 1;
+ int offset = 0;
+
+ // Set features to not set.
+ features->profile = Vp9CodecFeatures::kValueNotPresent;
+ features->level = Vp9CodecFeatures::kValueNotPresent;
+ features->bit_depth = Vp9CodecFeatures::kValueNotPresent;
+ features->chroma_subsampling = Vp9CodecFeatures::kValueNotPresent;
+ do {
+ const uint8_t id_byte = private_data[offset++];
+ const uint8_t length_byte = private_data[offset++];
+ if (length_byte != kVpxFeatureLength)
+ return false;
+ if (id_byte == kVp9ProfileId) {
+ const int priv_profile = static_cast<int>(private_data[offset++]);
+ if (priv_profile < 0 || priv_profile > 3)
+ return false;
+ if (features->profile != Vp9CodecFeatures::kValueNotPresent &&
+ features->profile != priv_profile) {
+ return false;
+ }
+ features->profile = priv_profile;
+ } else if (id_byte == kVp9LevelId) {
+ const int priv_level = static_cast<int>(private_data[offset++]);
+
+ const int kNumLevels = 14;
+ const int levels[kNumLevels] = {10, 11, 20, 21, 30, 31, 40,
+ 41, 50, 51, 52, 60, 61, 62};
+
+ for (int i = 0; i < kNumLevels; ++i) {
+ if (priv_level == levels[i]) {
+ if (features->level != Vp9CodecFeatures::kValueNotPresent &&
+ features->level != priv_level) {
+ return false;
+ }
+ features->level = priv_level;
+ break;
+ }
+ }
+ if (features->level == Vp9CodecFeatures::kValueNotPresent)
+ return false;
+ } else if (id_byte == kVp9BitDepthId) {
+ const int priv_profile = static_cast<int>(private_data[offset++]);
+ if (priv_profile != 8 && priv_profile != 10 && priv_profile != 12)
+ return false;
+ if (features->bit_depth != Vp9CodecFeatures::kValueNotPresent &&
+ features->bit_depth != priv_profile) {
+ return false;
+ }
+ features->bit_depth = priv_profile;
+ } else if (id_byte == kVp9ChromaSubsamplingId) {
+ const int priv_profile = static_cast<int>(private_data[offset++]);
+ if (priv_profile != 0 && priv_profile != 2 && priv_profile != 3)
+ return false;
+ if (features->chroma_subsampling != Vp9CodecFeatures::kValueNotPresent &&
+ features->chroma_subsampling != priv_profile) {
+ return false;
+ }
+ features->chroma_subsampling = priv_profile;
+ } else {
+ // Invalid ID.
+ return false;
+ }
+ } while (offset + kVpxCodecPrivateMinLength <= length);
+
+ return true;
+}
+} // namespace libwebm
diff --git a/media/libaom/src/third_party/libwebm/common/hdr_util.h b/media/libaom/src/third_party/libwebm/common/hdr_util.h
new file mode 100644
index 000000000..78e2eeb70
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/common/hdr_util.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2016 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#ifndef LIBWEBM_COMMON_HDR_UTIL_H_
+#define LIBWEBM_COMMON_HDR_UTIL_H_
+
+#include <stdint.h>
+
+#include <memory>
+
+#include "mkvmuxer/mkvmuxer.h"
+
+namespace mkvparser {
+struct Colour;
+struct MasteringMetadata;
+struct PrimaryChromaticity;
+} // namespace mkvparser
+
+namespace libwebm {
+// Utility types and functions for working with the Colour element and its
+// children. Copiers return true upon success. Presence functions return true
+// when the specified element is present.
+
+// TODO(tomfinegan): These should be moved to libwebm_utils once c++11 is
+// required by libwebm.
+
+// Features of the VP9 codec that may be set in the CodecPrivate of a VP9 video
+// stream. A value of kValueNotPresent represents that the value was not set in
+// the CodecPrivate.
+struct Vp9CodecFeatures {
+ static const int kValueNotPresent;
+
+ Vp9CodecFeatures()
+ : profile(kValueNotPresent),
+ level(kValueNotPresent),
+ bit_depth(kValueNotPresent),
+ chroma_subsampling(kValueNotPresent) {}
+ ~Vp9CodecFeatures() {}
+
+ int profile;
+ int level;
+ int bit_depth;
+ int chroma_subsampling;
+};
+
+typedef std::unique_ptr<mkvmuxer::PrimaryChromaticity> PrimaryChromaticityPtr;
+
+bool CopyPrimaryChromaticity(const mkvparser::PrimaryChromaticity& parser_pc,
+ PrimaryChromaticityPtr* muxer_pc);
+
+bool MasteringMetadataValuePresent(double value);
+
+bool CopyMasteringMetadata(const mkvparser::MasteringMetadata& parser_mm,
+ mkvmuxer::MasteringMetadata* muxer_mm);
+
+bool ColourValuePresent(long long value);
+
+bool CopyColour(const mkvparser::Colour& parser_colour,
+ mkvmuxer::Colour* muxer_colour);
+
+// Returns true if |features| is set to one or more valid values.
+bool ParseVpxCodecPrivate(const uint8_t* private_data, int32_t length,
+ Vp9CodecFeatures* features);
+
+} // namespace libwebm
+
+#endif // LIBWEBM_COMMON_HDR_UTIL_H_
diff --git a/media/libaom/src/third_party/libwebm/common/webmids.h b/media/libaom/src/third_party/libwebm/common/webmids.h
new file mode 100644
index 000000000..89d722a71
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/common/webmids.h
@@ -0,0 +1,192 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#ifndef COMMON_WEBMIDS_H_
+#define COMMON_WEBMIDS_H_
+
+namespace libwebm {
+
+enum MkvId {
+ kMkvEBML = 0x1A45DFA3,
+ kMkvEBMLVersion = 0x4286,
+ kMkvEBMLReadVersion = 0x42F7,
+ kMkvEBMLMaxIDLength = 0x42F2,
+ kMkvEBMLMaxSizeLength = 0x42F3,
+ kMkvDocType = 0x4282,
+ kMkvDocTypeVersion = 0x4287,
+ kMkvDocTypeReadVersion = 0x4285,
+ kMkvVoid = 0xEC,
+ kMkvSignatureSlot = 0x1B538667,
+ kMkvSignatureAlgo = 0x7E8A,
+ kMkvSignatureHash = 0x7E9A,
+ kMkvSignaturePublicKey = 0x7EA5,
+ kMkvSignature = 0x7EB5,
+ kMkvSignatureElements = 0x7E5B,
+ kMkvSignatureElementList = 0x7E7B,
+ kMkvSignedElement = 0x6532,
+ // segment
+ kMkvSegment = 0x18538067,
+ // Meta Seek Information
+ kMkvSeekHead = 0x114D9B74,
+ kMkvSeek = 0x4DBB,
+ kMkvSeekID = 0x53AB,
+ kMkvSeekPosition = 0x53AC,
+ // Segment Information
+ kMkvInfo = 0x1549A966,
+ kMkvTimecodeScale = 0x2AD7B1,
+ kMkvDuration = 0x4489,
+ kMkvDateUTC = 0x4461,
+ kMkvTitle = 0x7BA9,
+ kMkvMuxingApp = 0x4D80,
+ kMkvWritingApp = 0x5741,
+ // Cluster
+ kMkvCluster = 0x1F43B675,
+ kMkvTimecode = 0xE7,
+ kMkvPrevSize = 0xAB,
+ kMkvBlockGroup = 0xA0,
+ kMkvBlock = 0xA1,
+ kMkvBlockDuration = 0x9B,
+ kMkvReferenceBlock = 0xFB,
+ kMkvLaceNumber = 0xCC,
+ kMkvSimpleBlock = 0xA3,
+ kMkvBlockAdditions = 0x75A1,
+ kMkvBlockMore = 0xA6,
+ kMkvBlockAddID = 0xEE,
+ kMkvBlockAdditional = 0xA5,
+ kMkvDiscardPadding = 0x75A2,
+ // Track
+ kMkvTracks = 0x1654AE6B,
+ kMkvTrackEntry = 0xAE,
+ kMkvTrackNumber = 0xD7,
+ kMkvTrackUID = 0x73C5,
+ kMkvTrackType = 0x83,
+ kMkvFlagEnabled = 0xB9,
+ kMkvFlagDefault = 0x88,
+ kMkvFlagForced = 0x55AA,
+ kMkvFlagLacing = 0x9C,
+ kMkvDefaultDuration = 0x23E383,
+ kMkvMaxBlockAdditionID = 0x55EE,
+ kMkvName = 0x536E,
+ kMkvLanguage = 0x22B59C,
+ kMkvCodecID = 0x86,
+ kMkvCodecPrivate = 0x63A2,
+ kMkvCodecName = 0x258688,
+ kMkvCodecDelay = 0x56AA,
+ kMkvSeekPreRoll = 0x56BB,
+ // video
+ kMkvVideo = 0xE0,
+ kMkvFlagInterlaced = 0x9A,
+ kMkvStereoMode = 0x53B8,
+ kMkvAlphaMode = 0x53C0,
+ kMkvPixelWidth = 0xB0,
+ kMkvPixelHeight = 0xBA,
+ kMkvPixelCropBottom = 0x54AA,
+ kMkvPixelCropTop = 0x54BB,
+ kMkvPixelCropLeft = 0x54CC,
+ kMkvPixelCropRight = 0x54DD,
+ kMkvDisplayWidth = 0x54B0,
+ kMkvDisplayHeight = 0x54BA,
+ kMkvDisplayUnit = 0x54B2,
+ kMkvAspectRatioType = 0x54B3,
+ kMkvFrameRate = 0x2383E3,
+ // end video
+ // colour
+ kMkvColour = 0x55B0,
+ kMkvMatrixCoefficients = 0x55B1,
+ kMkvBitsPerChannel = 0x55B2,
+ kMkvChromaSubsamplingHorz = 0x55B3,
+ kMkvChromaSubsamplingVert = 0x55B4,
+ kMkvCbSubsamplingHorz = 0x55B5,
+ kMkvCbSubsamplingVert = 0x55B6,
+ kMkvChromaSitingHorz = 0x55B7,
+ kMkvChromaSitingVert = 0x55B8,
+ kMkvRange = 0x55B9,
+ kMkvTransferCharacteristics = 0x55BA,
+ kMkvPrimaries = 0x55BB,
+ kMkvMaxCLL = 0x55BC,
+ kMkvMaxFALL = 0x55BD,
+ // mastering metadata
+ kMkvMasteringMetadata = 0x55D0,
+ kMkvPrimaryRChromaticityX = 0x55D1,
+ kMkvPrimaryRChromaticityY = 0x55D2,
+ kMkvPrimaryGChromaticityX = 0x55D3,
+ kMkvPrimaryGChromaticityY = 0x55D4,
+ kMkvPrimaryBChromaticityX = 0x55D5,
+ kMkvPrimaryBChromaticityY = 0x55D6,
+ kMkvWhitePointChromaticityX = 0x55D7,
+ kMkvWhitePointChromaticityY = 0x55D8,
+ kMkvLuminanceMax = 0x55D9,
+ kMkvLuminanceMin = 0x55DA,
+ // end mastering metadata
+ // end colour
+ // projection
+ kMkvProjection = 0x7670,
+ kMkvProjectionType = 0x7671,
+ kMkvProjectionPrivate = 0x7672,
+ kMkvProjectionPoseYaw = 0x7673,
+ kMkvProjectionPosePitch = 0x7674,
+ kMkvProjectionPoseRoll = 0x7675,
+ // end projection
+ // audio
+ kMkvAudio = 0xE1,
+ kMkvSamplingFrequency = 0xB5,
+ kMkvOutputSamplingFrequency = 0x78B5,
+ kMkvChannels = 0x9F,
+ kMkvBitDepth = 0x6264,
+ // end audio
+ // ContentEncodings
+ kMkvContentEncodings = 0x6D80,
+ kMkvContentEncoding = 0x6240,
+ kMkvContentEncodingOrder = 0x5031,
+ kMkvContentEncodingScope = 0x5032,
+ kMkvContentEncodingType = 0x5033,
+ kMkvContentCompression = 0x5034,
+ kMkvContentCompAlgo = 0x4254,
+ kMkvContentCompSettings = 0x4255,
+ kMkvContentEncryption = 0x5035,
+ kMkvContentEncAlgo = 0x47E1,
+ kMkvContentEncKeyID = 0x47E2,
+ kMkvContentSignature = 0x47E3,
+ kMkvContentSigKeyID = 0x47E4,
+ kMkvContentSigAlgo = 0x47E5,
+ kMkvContentSigHashAlgo = 0x47E6,
+ kMkvContentEncAESSettings = 0x47E7,
+ kMkvAESSettingsCipherMode = 0x47E8,
+ kMkvAESSettingsCipherInitData = 0x47E9,
+ // end ContentEncodings
+ // Cueing Data
+ kMkvCues = 0x1C53BB6B,
+ kMkvCuePoint = 0xBB,
+ kMkvCueTime = 0xB3,
+ kMkvCueTrackPositions = 0xB7,
+ kMkvCueTrack = 0xF7,
+ kMkvCueClusterPosition = 0xF1,
+ kMkvCueBlockNumber = 0x5378,
+ // Chapters
+ kMkvChapters = 0x1043A770,
+ kMkvEditionEntry = 0x45B9,
+ kMkvChapterAtom = 0xB6,
+ kMkvChapterUID = 0x73C4,
+ kMkvChapterStringUID = 0x5654,
+ kMkvChapterTimeStart = 0x91,
+ kMkvChapterTimeEnd = 0x92,
+ kMkvChapterDisplay = 0x80,
+ kMkvChapString = 0x85,
+ kMkvChapLanguage = 0x437C,
+ kMkvChapCountry = 0x437E,
+ // Tags
+ kMkvTags = 0x1254C367,
+ kMkvTag = 0x7373,
+ kMkvSimpleTag = 0x67C8,
+ kMkvTagName = 0x45A3,
+ kMkvTagString = 0x4487
+};
+
+} // namespace libwebm
+
+#endif // COMMON_WEBMIDS_H_
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.cc b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.cc
new file mode 100644
index 000000000..bae2c99b8
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.cc
@@ -0,0 +1,4194 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#include "mkvmuxer/mkvmuxer.h"
+
+#include <stdint.h>
+
+#include <cfloat>
+#include <climits>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <memory>
+#include <new>
+#include <string>
+#include <vector>
+
+#include "common/webmids.h"
+#include "mkvmuxer/mkvmuxerutil.h"
+#include "mkvmuxer/mkvwriter.h"
+#include "mkvparser/mkvparser.h"
+
+namespace mkvmuxer {
+
+const float PrimaryChromaticity::kChromaticityMin = 0.0f;
+const float PrimaryChromaticity::kChromaticityMax = 1.0f;
+const float MasteringMetadata::kMinLuminance = 0.0f;
+const float MasteringMetadata::kMinLuminanceMax = 999.99f;
+const float MasteringMetadata::kMaxLuminanceMax = 9999.99f;
+const float MasteringMetadata::kValueNotPresent = FLT_MAX;
+const uint64_t Colour::kValueNotPresent = UINT64_MAX;
+
+namespace {
+
+const char kDocTypeWebm[] = "webm";
+const char kDocTypeMatroska[] = "matroska";
+
+// Deallocate the string designated by |dst|, and then copy the |src|
+// string to |dst|. The caller owns both the |src| string and the
+// |dst| copy (hence the caller is responsible for eventually
+// deallocating the strings, either directly, or indirectly via
+// StrCpy). Returns true if the source string was successfully copied
+// to the destination.
+bool StrCpy(const char* src, char** dst_ptr) {
+ if (dst_ptr == NULL)
+ return false;
+
+ char*& dst = *dst_ptr;
+
+ delete[] dst;
+ dst = NULL;
+
+ if (src == NULL)
+ return true;
+
+ const size_t size = strlen(src) + 1;
+
+ dst = new (std::nothrow) char[size]; // NOLINT
+ if (dst == NULL)
+ return false;
+
+ strcpy(dst, src); // NOLINT
+ return true;
+}
+
+typedef std::unique_ptr<PrimaryChromaticity> PrimaryChromaticityPtr;
+bool CopyChromaticity(const PrimaryChromaticity* src,
+ PrimaryChromaticityPtr* dst) {
+ if (!dst)
+ return false;
+
+ dst->reset(new (std::nothrow) PrimaryChromaticity(src->x(), src->y()));
+ if (!dst->get())
+ return false;
+
+ return true;
+}
+
+} // namespace
+
+///////////////////////////////////////////////////////////////
+//
+// IMkvWriter Class
+
+IMkvWriter::IMkvWriter() {}
+
+IMkvWriter::~IMkvWriter() {}
+
+bool WriteEbmlHeader(IMkvWriter* writer, uint64_t doc_type_version,
+ const char* const doc_type) {
+ // Level 0
+ uint64_t size =
+ EbmlElementSize(libwebm::kMkvEBMLVersion, static_cast<uint64>(1));
+ size += EbmlElementSize(libwebm::kMkvEBMLReadVersion, static_cast<uint64>(1));
+ size += EbmlElementSize(libwebm::kMkvEBMLMaxIDLength, static_cast<uint64>(4));
+ size +=
+ EbmlElementSize(libwebm::kMkvEBMLMaxSizeLength, static_cast<uint64>(8));
+ size += EbmlElementSize(libwebm::kMkvDocType, doc_type);
+ size += EbmlElementSize(libwebm::kMkvDocTypeVersion,
+ static_cast<uint64>(doc_type_version));
+ size +=
+ EbmlElementSize(libwebm::kMkvDocTypeReadVersion, static_cast<uint64>(2));
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvEBML, size))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvEBMLVersion,
+ static_cast<uint64>(1))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvEBMLReadVersion,
+ static_cast<uint64>(1))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvEBMLMaxIDLength,
+ static_cast<uint64>(4))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvEBMLMaxSizeLength,
+ static_cast<uint64>(8))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvDocType, doc_type))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvDocTypeVersion,
+ static_cast<uint64>(doc_type_version))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvDocTypeReadVersion,
+ static_cast<uint64>(2))) {
+ return false;
+ }
+
+ return true;
+}
+
+bool WriteEbmlHeader(IMkvWriter* writer, uint64_t doc_type_version) {
+ return WriteEbmlHeader(writer, doc_type_version, kDocTypeWebm);
+}
+
+bool WriteEbmlHeader(IMkvWriter* writer) {
+ return WriteEbmlHeader(writer, mkvmuxer::Segment::kDefaultDocTypeVersion);
+}
+
+bool ChunkedCopy(mkvparser::IMkvReader* source, mkvmuxer::IMkvWriter* dst,
+ int64_t start, int64_t size) {
+ // TODO(vigneshv): Check if this is a reasonable value.
+ const uint32_t kBufSize = 2048;
+ uint8_t* buf = new uint8_t[kBufSize];
+ int64_t offset = start;
+ while (size > 0) {
+ const int64_t read_len = (size > kBufSize) ? kBufSize : size;
+ if (source->Read(offset, static_cast<long>(read_len), buf))
+ return false;
+ dst->Write(buf, static_cast<uint32_t>(read_len));
+ offset += read_len;
+ size -= read_len;
+ }
+ delete[] buf;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Frame Class
+
+Frame::Frame()
+ : add_id_(0),
+ additional_(NULL),
+ additional_length_(0),
+ duration_(0),
+ duration_set_(false),
+ frame_(NULL),
+ is_key_(false),
+ length_(0),
+ track_number_(0),
+ timestamp_(0),
+ discard_padding_(0),
+ reference_block_timestamp_(0),
+ reference_block_timestamp_set_(false) {}
+
+Frame::~Frame() {
+ delete[] frame_;
+ delete[] additional_;
+}
+
+bool Frame::CopyFrom(const Frame& frame) {
+ delete[] frame_;
+ frame_ = NULL;
+ length_ = 0;
+ if (frame.length() > 0 && frame.frame() != NULL &&
+ !Init(frame.frame(), frame.length())) {
+ return false;
+ }
+ add_id_ = 0;
+ delete[] additional_;
+ additional_ = NULL;
+ additional_length_ = 0;
+ if (frame.additional_length() > 0 && frame.additional() != NULL &&
+ !AddAdditionalData(frame.additional(), frame.additional_length(),
+ frame.add_id())) {
+ return false;
+ }
+ duration_ = frame.duration();
+ duration_set_ = frame.duration_set();
+ is_key_ = frame.is_key();
+ track_number_ = frame.track_number();
+ timestamp_ = frame.timestamp();
+ discard_padding_ = frame.discard_padding();
+ reference_block_timestamp_ = frame.reference_block_timestamp();
+ reference_block_timestamp_set_ = frame.reference_block_timestamp_set();
+ return true;
+}
+
+bool Frame::Init(const uint8_t* frame, uint64_t length) {
+ uint8_t* const data =
+ new (std::nothrow) uint8_t[static_cast<size_t>(length)]; // NOLINT
+ if (!data)
+ return false;
+
+ delete[] frame_;
+ frame_ = data;
+ length_ = length;
+
+ memcpy(frame_, frame, static_cast<size_t>(length_));
+ return true;
+}
+
+bool Frame::AddAdditionalData(const uint8_t* additional, uint64_t length,
+ uint64_t add_id) {
+ uint8_t* const data =
+ new (std::nothrow) uint8_t[static_cast<size_t>(length)]; // NOLINT
+ if (!data)
+ return false;
+
+ delete[] additional_;
+ additional_ = data;
+ additional_length_ = length;
+ add_id_ = add_id;
+
+ memcpy(additional_, additional, static_cast<size_t>(additional_length_));
+ return true;
+}
+
+bool Frame::IsValid() const {
+ if (length_ == 0 || !frame_) {
+ return false;
+ }
+ if ((additional_length_ != 0 && !additional_) ||
+ (additional_ != NULL && additional_length_ == 0)) {
+ return false;
+ }
+ if (track_number_ == 0 || track_number_ > kMaxTrackNumber) {
+ return false;
+ }
+ if (!CanBeSimpleBlock() && !is_key_ && !reference_block_timestamp_set_) {
+ return false;
+ }
+ return true;
+}
+
+bool Frame::CanBeSimpleBlock() const {
+ return additional_ == NULL && discard_padding_ == 0 && duration_ == 0;
+}
+
+void Frame::set_duration(uint64_t duration) {
+ duration_ = duration;
+ duration_set_ = true;
+}
+
+void Frame::set_reference_block_timestamp(int64_t reference_block_timestamp) {
+ reference_block_timestamp_ = reference_block_timestamp;
+ reference_block_timestamp_set_ = true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// CuePoint Class
+
+CuePoint::CuePoint()
+ : time_(0),
+ track_(0),
+ cluster_pos_(0),
+ block_number_(1),
+ output_block_number_(true) {}
+
+CuePoint::~CuePoint() {}
+
+bool CuePoint::Write(IMkvWriter* writer) const {
+ if (!writer || track_ < 1 || cluster_pos_ < 1)
+ return false;
+
+ uint64_t size = EbmlElementSize(libwebm::kMkvCueClusterPosition,
+ static_cast<uint64>(cluster_pos_));
+ size += EbmlElementSize(libwebm::kMkvCueTrack, static_cast<uint64>(track_));
+ if (output_block_number_ && block_number_ > 1)
+ size += EbmlElementSize(libwebm::kMkvCueBlockNumber,
+ static_cast<uint64>(block_number_));
+ const uint64_t track_pos_size =
+ EbmlMasterElementSize(libwebm::kMkvCueTrackPositions, size) + size;
+ const uint64_t payload_size =
+ EbmlElementSize(libwebm::kMkvCueTime, static_cast<uint64>(time_)) +
+ track_pos_size;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvCuePoint, payload_size))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvCueTime,
+ static_cast<uint64>(time_))) {
+ return false;
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvCueTrackPositions, size))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvCueTrack,
+ static_cast<uint64>(track_))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvCueClusterPosition,
+ static_cast<uint64>(cluster_pos_))) {
+ return false;
+ }
+ if (output_block_number_ && block_number_ > 1) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvCueBlockNumber,
+ static_cast<uint64>(block_number_))) {
+ return false;
+ }
+ }
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0)
+ return false;
+
+ if (stop_position - payload_position != static_cast<int64_t>(payload_size))
+ return false;
+
+ return true;
+}
+
+uint64_t CuePoint::PayloadSize() const {
+ uint64_t size = EbmlElementSize(libwebm::kMkvCueClusterPosition,
+ static_cast<uint64>(cluster_pos_));
+ size += EbmlElementSize(libwebm::kMkvCueTrack, static_cast<uint64>(track_));
+ if (output_block_number_ && block_number_ > 1)
+ size += EbmlElementSize(libwebm::kMkvCueBlockNumber,
+ static_cast<uint64>(block_number_));
+ const uint64_t track_pos_size =
+ EbmlMasterElementSize(libwebm::kMkvCueTrackPositions, size) + size;
+ const uint64_t payload_size =
+ EbmlElementSize(libwebm::kMkvCueTime, static_cast<uint64>(time_)) +
+ track_pos_size;
+
+ return payload_size;
+}
+
+uint64_t CuePoint::Size() const {
+ const uint64_t payload_size = PayloadSize();
+ return EbmlMasterElementSize(libwebm::kMkvCuePoint, payload_size) +
+ payload_size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Cues Class
+
+Cues::Cues()
+ : cue_entries_capacity_(0),
+ cue_entries_size_(0),
+ cue_entries_(NULL),
+ output_block_number_(true) {}
+
+Cues::~Cues() {
+ if (cue_entries_) {
+ for (int32_t i = 0; i < cue_entries_size_; ++i) {
+ CuePoint* const cue = cue_entries_[i];
+ delete cue;
+ }
+ delete[] cue_entries_;
+ }
+}
+
+bool Cues::AddCue(CuePoint* cue) {
+ if (!cue)
+ return false;
+
+ if ((cue_entries_size_ + 1) > cue_entries_capacity_) {
+ // Add more CuePoints.
+ const int32_t new_capacity =
+ (!cue_entries_capacity_) ? 2 : cue_entries_capacity_ * 2;
+
+ if (new_capacity < 1)
+ return false;
+
+ CuePoint** const cues =
+ new (std::nothrow) CuePoint*[new_capacity]; // NOLINT
+ if (!cues)
+ return false;
+
+ for (int32_t i = 0; i < cue_entries_size_; ++i) {
+ cues[i] = cue_entries_[i];
+ }
+
+ delete[] cue_entries_;
+
+ cue_entries_ = cues;
+ cue_entries_capacity_ = new_capacity;
+ }
+
+ cue->set_output_block_number(output_block_number_);
+ cue_entries_[cue_entries_size_++] = cue;
+ return true;
+}
+
+CuePoint* Cues::GetCueByIndex(int32_t index) const {
+ if (cue_entries_ == NULL)
+ return NULL;
+
+ if (index >= cue_entries_size_)
+ return NULL;
+
+ return cue_entries_[index];
+}
+
+uint64_t Cues::Size() {
+ uint64_t size = 0;
+ for (int32_t i = 0; i < cue_entries_size_; ++i)
+ size += GetCueByIndex(i)->Size();
+ size += EbmlMasterElementSize(libwebm::kMkvCues, size);
+ return size;
+}
+
+bool Cues::Write(IMkvWriter* writer) const {
+ if (!writer)
+ return false;
+
+ uint64_t size = 0;
+ for (int32_t i = 0; i < cue_entries_size_; ++i) {
+ const CuePoint* const cue = GetCueByIndex(i);
+
+ if (!cue)
+ return false;
+
+ size += cue->Size();
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvCues, size))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ for (int32_t i = 0; i < cue_entries_size_; ++i) {
+ const CuePoint* const cue = GetCueByIndex(i);
+
+ if (!cue->Write(writer))
+ return false;
+ }
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0)
+ return false;
+
+ if (stop_position - payload_position != static_cast<int64_t>(size))
+ return false;
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// ContentEncAESSettings Class
+
+ContentEncAESSettings::ContentEncAESSettings() : cipher_mode_(kCTR) {}
+
+uint64_t ContentEncAESSettings::Size() const {
+ const uint64_t payload = PayloadSize();
+ const uint64_t size =
+ EbmlMasterElementSize(libwebm::kMkvContentEncAESSettings, payload) +
+ payload;
+ return size;
+}
+
+bool ContentEncAESSettings::Write(IMkvWriter* writer) const {
+ const uint64_t payload = PayloadSize();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncAESSettings,
+ payload))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvAESSettingsCipherMode,
+ static_cast<uint64>(cipher_mode_))) {
+ return false;
+ }
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(payload))
+ return false;
+
+ return true;
+}
+
+uint64_t ContentEncAESSettings::PayloadSize() const {
+ uint64_t size = EbmlElementSize(libwebm::kMkvAESSettingsCipherMode,
+ static_cast<uint64>(cipher_mode_));
+ return size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// ContentEncoding Class
+
+ContentEncoding::ContentEncoding()
+ : enc_algo_(5),
+ enc_key_id_(NULL),
+ encoding_order_(0),
+ encoding_scope_(1),
+ encoding_type_(1),
+ enc_key_id_length_(0) {}
+
+ContentEncoding::~ContentEncoding() { delete[] enc_key_id_; }
+
+bool ContentEncoding::SetEncryptionID(const uint8_t* id, uint64_t length) {
+ if (!id || length < 1)
+ return false;
+
+ delete[] enc_key_id_;
+
+ enc_key_id_ =
+ new (std::nothrow) uint8_t[static_cast<size_t>(length)]; // NOLINT
+ if (!enc_key_id_)
+ return false;
+
+ memcpy(enc_key_id_, id, static_cast<size_t>(length));
+ enc_key_id_length_ = length;
+
+ return true;
+}
+
+uint64_t ContentEncoding::Size() const {
+ const uint64_t encryption_size = EncryptionSize();
+ const uint64_t encoding_size = EncodingSize(0, encryption_size);
+ const uint64_t encodings_size =
+ EbmlMasterElementSize(libwebm::kMkvContentEncoding, encoding_size) +
+ encoding_size;
+
+ return encodings_size;
+}
+
+bool ContentEncoding::Write(IMkvWriter* writer) const {
+ const uint64_t encryption_size = EncryptionSize();
+ const uint64_t encoding_size = EncodingSize(0, encryption_size);
+ const uint64_t size =
+ EbmlMasterElementSize(libwebm::kMkvContentEncoding, encoding_size) +
+ encoding_size;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncoding,
+ encoding_size))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvContentEncodingOrder,
+ static_cast<uint64>(encoding_order_)))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvContentEncodingScope,
+ static_cast<uint64>(encoding_scope_)))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvContentEncodingType,
+ static_cast<uint64>(encoding_type_)))
+ return false;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncryption,
+ encryption_size))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvContentEncAlgo,
+ static_cast<uint64>(enc_algo_))) {
+ return false;
+ }
+ if (!WriteEbmlElement(writer, libwebm::kMkvContentEncKeyID, enc_key_id_,
+ enc_key_id_length_))
+ return false;
+
+ if (!enc_aes_settings_.Write(writer))
+ return false;
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(size))
+ return false;
+
+ return true;
+}
+
+uint64_t ContentEncoding::EncodingSize(uint64_t compresion_size,
+ uint64_t encryption_size) const {
+ // TODO(fgalligan): Add support for compression settings.
+ if (compresion_size != 0)
+ return 0;
+
+ uint64_t encoding_size = 0;
+
+ if (encryption_size > 0) {
+ encoding_size +=
+ EbmlMasterElementSize(libwebm::kMkvContentEncryption, encryption_size) +
+ encryption_size;
+ }
+ encoding_size += EbmlElementSize(libwebm::kMkvContentEncodingType,
+ static_cast<uint64>(encoding_type_));
+ encoding_size += EbmlElementSize(libwebm::kMkvContentEncodingScope,
+ static_cast<uint64>(encoding_scope_));
+ encoding_size += EbmlElementSize(libwebm::kMkvContentEncodingOrder,
+ static_cast<uint64>(encoding_order_));
+
+ return encoding_size;
+}
+
+uint64_t ContentEncoding::EncryptionSize() const {
+ const uint64_t aes_size = enc_aes_settings_.Size();
+
+ uint64_t encryption_size = EbmlElementSize(libwebm::kMkvContentEncKeyID,
+ enc_key_id_, enc_key_id_length_);
+ encryption_size += EbmlElementSize(libwebm::kMkvContentEncAlgo,
+ static_cast<uint64>(enc_algo_));
+
+ return encryption_size + aes_size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Track Class
+
+Track::Track(unsigned int* seed)
+ : codec_id_(NULL),
+ codec_private_(NULL),
+ language_(NULL),
+ max_block_additional_id_(0),
+ name_(NULL),
+ number_(0),
+ type_(0),
+ uid_(MakeUID(seed)),
+ codec_delay_(0),
+ seek_pre_roll_(0),
+ default_duration_(0),
+ codec_private_length_(0),
+ content_encoding_entries_(NULL),
+ content_encoding_entries_size_(0) {}
+
+Track::~Track() {
+ delete[] codec_id_;
+ delete[] codec_private_;
+ delete[] language_;
+ delete[] name_;
+
+ if (content_encoding_entries_) {
+ for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) {
+ ContentEncoding* const encoding = content_encoding_entries_[i];
+ delete encoding;
+ }
+ delete[] content_encoding_entries_;
+ }
+}
+
+bool Track::AddContentEncoding() {
+ const uint32_t count = content_encoding_entries_size_ + 1;
+
+ ContentEncoding** const content_encoding_entries =
+ new (std::nothrow) ContentEncoding*[count]; // NOLINT
+ if (!content_encoding_entries)
+ return false;
+
+ ContentEncoding* const content_encoding =
+ new (std::nothrow) ContentEncoding(); // NOLINT
+ if (!content_encoding) {
+ delete[] content_encoding_entries;
+ return false;
+ }
+
+ for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) {
+ content_encoding_entries[i] = content_encoding_entries_[i];
+ }
+
+ delete[] content_encoding_entries_;
+
+ content_encoding_entries_ = content_encoding_entries;
+ content_encoding_entries_[content_encoding_entries_size_] = content_encoding;
+ content_encoding_entries_size_ = count;
+ return true;
+}
+
+ContentEncoding* Track::GetContentEncodingByIndex(uint32_t index) const {
+ if (content_encoding_entries_ == NULL)
+ return NULL;
+
+ if (index >= content_encoding_entries_size_)
+ return NULL;
+
+ return content_encoding_entries_[index];
+}
+
+uint64_t Track::PayloadSize() const {
+ uint64_t size =
+ EbmlElementSize(libwebm::kMkvTrackNumber, static_cast<uint64>(number_));
+ size += EbmlElementSize(libwebm::kMkvTrackUID, static_cast<uint64>(uid_));
+ size += EbmlElementSize(libwebm::kMkvTrackType, static_cast<uint64>(type_));
+ if (codec_id_)
+ size += EbmlElementSize(libwebm::kMkvCodecID, codec_id_);
+ if (codec_private_)
+ size += EbmlElementSize(libwebm::kMkvCodecPrivate, codec_private_,
+ codec_private_length_);
+ if (language_)
+ size += EbmlElementSize(libwebm::kMkvLanguage, language_);
+ if (name_)
+ size += EbmlElementSize(libwebm::kMkvName, name_);
+ if (max_block_additional_id_) {
+ size += EbmlElementSize(libwebm::kMkvMaxBlockAdditionID,
+ static_cast<uint64>(max_block_additional_id_));
+ }
+ if (codec_delay_) {
+ size += EbmlElementSize(libwebm::kMkvCodecDelay,
+ static_cast<uint64>(codec_delay_));
+ }
+ if (seek_pre_roll_) {
+ size += EbmlElementSize(libwebm::kMkvSeekPreRoll,
+ static_cast<uint64>(seek_pre_roll_));
+ }
+ if (default_duration_) {
+ size += EbmlElementSize(libwebm::kMkvDefaultDuration,
+ static_cast<uint64>(default_duration_));
+ }
+
+ if (content_encoding_entries_size_ > 0) {
+ uint64_t content_encodings_size = 0;
+ for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) {
+ ContentEncoding* const encoding = content_encoding_entries_[i];
+ content_encodings_size += encoding->Size();
+ }
+
+ size += EbmlMasterElementSize(libwebm::kMkvContentEncodings,
+ content_encodings_size) +
+ content_encodings_size;
+ }
+
+ return size;
+}
+
+uint64_t Track::Size() const {
+ uint64_t size = PayloadSize();
+ size += EbmlMasterElementSize(libwebm::kMkvTrackEntry, size);
+ return size;
+}
+
+bool Track::Write(IMkvWriter* writer) const {
+ if (!writer)
+ return false;
+
+ // mandatory elements without a default value.
+ if (!type_ || !codec_id_)
+ return false;
+
+ // |size| may be bigger than what is written out in this function because
+ // derived classes may write out more data in the Track element.
+ const uint64_t payload_size = PayloadSize();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvTrackEntry, payload_size))
+ return false;
+
+ uint64_t size =
+ EbmlElementSize(libwebm::kMkvTrackNumber, static_cast<uint64>(number_));
+ size += EbmlElementSize(libwebm::kMkvTrackUID, static_cast<uint64>(uid_));
+ size += EbmlElementSize(libwebm::kMkvTrackType, static_cast<uint64>(type_));
+ if (codec_id_)
+ size += EbmlElementSize(libwebm::kMkvCodecID, codec_id_);
+ if (codec_private_)
+ size += EbmlElementSize(libwebm::kMkvCodecPrivate, codec_private_,
+ static_cast<uint64>(codec_private_length_));
+ if (language_)
+ size += EbmlElementSize(libwebm::kMkvLanguage, language_);
+ if (name_)
+ size += EbmlElementSize(libwebm::kMkvName, name_);
+ if (max_block_additional_id_)
+ size += EbmlElementSize(libwebm::kMkvMaxBlockAdditionID,
+ static_cast<uint64>(max_block_additional_id_));
+ if (codec_delay_)
+ size += EbmlElementSize(libwebm::kMkvCodecDelay,
+ static_cast<uint64>(codec_delay_));
+ if (seek_pre_roll_)
+ size += EbmlElementSize(libwebm::kMkvSeekPreRoll,
+ static_cast<uint64>(seek_pre_roll_));
+ if (default_duration_)
+ size += EbmlElementSize(libwebm::kMkvDefaultDuration,
+ static_cast<uint64>(default_duration_));
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvTrackNumber,
+ static_cast<uint64>(number_)))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvTrackUID,
+ static_cast<uint64>(uid_)))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvTrackType,
+ static_cast<uint64>(type_)))
+ return false;
+ if (max_block_additional_id_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvMaxBlockAdditionID,
+ static_cast<uint64>(max_block_additional_id_))) {
+ return false;
+ }
+ }
+ if (codec_delay_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvCodecDelay,
+ static_cast<uint64>(codec_delay_)))
+ return false;
+ }
+ if (seek_pre_roll_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvSeekPreRoll,
+ static_cast<uint64>(seek_pre_roll_)))
+ return false;
+ }
+ if (default_duration_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvDefaultDuration,
+ static_cast<uint64>(default_duration_)))
+ return false;
+ }
+ if (codec_id_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvCodecID, codec_id_))
+ return false;
+ }
+ if (codec_private_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvCodecPrivate, codec_private_,
+ static_cast<uint64>(codec_private_length_)))
+ return false;
+ }
+ if (language_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvLanguage, language_))
+ return false;
+ }
+ if (name_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvName, name_))
+ return false;
+ }
+
+ int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(size))
+ return false;
+
+ if (content_encoding_entries_size_ > 0) {
+ uint64_t content_encodings_size = 0;
+ for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) {
+ ContentEncoding* const encoding = content_encoding_entries_[i];
+ content_encodings_size += encoding->Size();
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvContentEncodings,
+ content_encodings_size))
+ return false;
+
+ for (uint32_t i = 0; i < content_encoding_entries_size_; ++i) {
+ ContentEncoding* const encoding = content_encoding_entries_[i];
+ if (!encoding->Write(writer))
+ return false;
+ }
+ }
+
+ stop_position = writer->Position();
+ if (stop_position < 0)
+ return false;
+ return true;
+}
+
+bool Track::SetCodecPrivate(const uint8_t* codec_private, uint64_t length) {
+ if (!codec_private || length < 1)
+ return false;
+
+ delete[] codec_private_;
+
+ codec_private_ =
+ new (std::nothrow) uint8_t[static_cast<size_t>(length)]; // NOLINT
+ if (!codec_private_)
+ return false;
+
+ memcpy(codec_private_, codec_private, static_cast<size_t>(length));
+ codec_private_length_ = length;
+
+ return true;
+}
+
+void Track::set_codec_id(const char* codec_id) {
+ if (codec_id) {
+ delete[] codec_id_;
+
+ const size_t length = strlen(codec_id) + 1;
+ codec_id_ = new (std::nothrow) char[length]; // NOLINT
+ if (codec_id_) {
+#ifdef _MSC_VER
+ strcpy_s(codec_id_, length, codec_id);
+#else
+ strcpy(codec_id_, codec_id);
+#endif
+ }
+ }
+}
+
+// TODO(fgalligan): Vet the language parameter.
+void Track::set_language(const char* language) {
+ if (language) {
+ delete[] language_;
+
+ const size_t length = strlen(language) + 1;
+ language_ = new (std::nothrow) char[length]; // NOLINT
+ if (language_) {
+#ifdef _MSC_VER
+ strcpy_s(language_, length, language);
+#else
+ strcpy(language_, language);
+#endif
+ }
+ }
+}
+
+void Track::set_name(const char* name) {
+ if (name) {
+ delete[] name_;
+
+ const size_t length = strlen(name) + 1;
+ name_ = new (std::nothrow) char[length]; // NOLINT
+ if (name_) {
+#ifdef _MSC_VER
+ strcpy_s(name_, length, name);
+#else
+ strcpy(name_, name);
+#endif
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Colour and its child elements
+
+uint64_t PrimaryChromaticity::PrimaryChromaticitySize(
+ libwebm::MkvId x_id, libwebm::MkvId y_id) const {
+ return EbmlElementSize(x_id, x_) + EbmlElementSize(y_id, y_);
+}
+
+bool PrimaryChromaticity::Write(IMkvWriter* writer, libwebm::MkvId x_id,
+ libwebm::MkvId y_id) const {
+ if (!Valid()) {
+ return false;
+ }
+ return WriteEbmlElement(writer, x_id, x_) &&
+ WriteEbmlElement(writer, y_id, y_);
+}
+
+bool PrimaryChromaticity::Valid() const {
+ return (x_ >= kChromaticityMin && x_ <= kChromaticityMax &&
+ y_ >= kChromaticityMin && y_ <= kChromaticityMax);
+}
+
+uint64_t MasteringMetadata::MasteringMetadataSize() const {
+ uint64_t size = PayloadSize();
+
+ if (size > 0)
+ size += EbmlMasterElementSize(libwebm::kMkvMasteringMetadata, size);
+
+ return size;
+}
+
+bool MasteringMetadata::Valid() const {
+ if (luminance_min_ != kValueNotPresent) {
+ if (luminance_min_ < kMinLuminance || luminance_min_ > kMinLuminanceMax ||
+ luminance_min_ > luminance_max_) {
+ return false;
+ }
+ }
+ if (luminance_max_ != kValueNotPresent) {
+ if (luminance_max_ < kMinLuminance || luminance_max_ > kMaxLuminanceMax ||
+ luminance_max_ < luminance_min_) {
+ return false;
+ }
+ }
+ if (r_ && !r_->Valid())
+ return false;
+ if (g_ && !g_->Valid())
+ return false;
+ if (b_ && !b_->Valid())
+ return false;
+ if (white_point_ && !white_point_->Valid())
+ return false;
+
+ return true;
+}
+
+bool MasteringMetadata::Write(IMkvWriter* writer) const {
+ const uint64_t size = PayloadSize();
+
+ // Don't write an empty element.
+ if (size == 0)
+ return true;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvMasteringMetadata, size))
+ return false;
+ if (luminance_max_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvLuminanceMax, luminance_max_)) {
+ return false;
+ }
+ if (luminance_min_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvLuminanceMin, luminance_min_)) {
+ return false;
+ }
+ if (r_ &&
+ !r_->Write(writer, libwebm::kMkvPrimaryRChromaticityX,
+ libwebm::kMkvPrimaryRChromaticityY)) {
+ return false;
+ }
+ if (g_ &&
+ !g_->Write(writer, libwebm::kMkvPrimaryGChromaticityX,
+ libwebm::kMkvPrimaryGChromaticityY)) {
+ return false;
+ }
+ if (b_ &&
+ !b_->Write(writer, libwebm::kMkvPrimaryBChromaticityX,
+ libwebm::kMkvPrimaryBChromaticityY)) {
+ return false;
+ }
+ if (white_point_ &&
+ !white_point_->Write(writer, libwebm::kMkvWhitePointChromaticityX,
+ libwebm::kMkvWhitePointChromaticityY)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool MasteringMetadata::SetChromaticity(
+ const PrimaryChromaticity* r, const PrimaryChromaticity* g,
+ const PrimaryChromaticity* b, const PrimaryChromaticity* white_point) {
+ PrimaryChromaticityPtr r_ptr(nullptr);
+ if (r) {
+ if (!CopyChromaticity(r, &r_ptr))
+ return false;
+ }
+ PrimaryChromaticityPtr g_ptr(nullptr);
+ if (g) {
+ if (!CopyChromaticity(g, &g_ptr))
+ return false;
+ }
+ PrimaryChromaticityPtr b_ptr(nullptr);
+ if (b) {
+ if (!CopyChromaticity(b, &b_ptr))
+ return false;
+ }
+ PrimaryChromaticityPtr wp_ptr(nullptr);
+ if (white_point) {
+ if (!CopyChromaticity(white_point, &wp_ptr))
+ return false;
+ }
+
+ r_ = r_ptr.release();
+ g_ = g_ptr.release();
+ b_ = b_ptr.release();
+ white_point_ = wp_ptr.release();
+ return true;
+}
+
+uint64_t MasteringMetadata::PayloadSize() const {
+ uint64_t size = 0;
+
+ if (luminance_max_ != kValueNotPresent)
+ size += EbmlElementSize(libwebm::kMkvLuminanceMax, luminance_max_);
+ if (luminance_min_ != kValueNotPresent)
+ size += EbmlElementSize(libwebm::kMkvLuminanceMin, luminance_min_);
+
+ if (r_) {
+ size += r_->PrimaryChromaticitySize(libwebm::kMkvPrimaryRChromaticityX,
+ libwebm::kMkvPrimaryRChromaticityY);
+ }
+ if (g_) {
+ size += g_->PrimaryChromaticitySize(libwebm::kMkvPrimaryGChromaticityX,
+ libwebm::kMkvPrimaryGChromaticityY);
+ }
+ if (b_) {
+ size += b_->PrimaryChromaticitySize(libwebm::kMkvPrimaryBChromaticityX,
+ libwebm::kMkvPrimaryBChromaticityY);
+ }
+ if (white_point_) {
+ size += white_point_->PrimaryChromaticitySize(
+ libwebm::kMkvWhitePointChromaticityX,
+ libwebm::kMkvWhitePointChromaticityY);
+ }
+
+ return size;
+}
+
+uint64_t Colour::ColourSize() const {
+ uint64_t size = PayloadSize();
+
+ if (size > 0)
+ size += EbmlMasterElementSize(libwebm::kMkvColour, size);
+
+ return size;
+}
+
+bool Colour::Valid() const {
+ if (mastering_metadata_ && !mastering_metadata_->Valid())
+ return false;
+ if (matrix_coefficients_ != kValueNotPresent &&
+ !IsMatrixCoefficientsValueValid(matrix_coefficients_)) {
+ return false;
+ }
+ if (chroma_siting_horz_ != kValueNotPresent &&
+ !IsChromaSitingHorzValueValid(chroma_siting_horz_)) {
+ return false;
+ }
+ if (chroma_siting_vert_ != kValueNotPresent &&
+ !IsChromaSitingVertValueValid(chroma_siting_vert_)) {
+ return false;
+ }
+ if (range_ != kValueNotPresent && !IsColourRangeValueValid(range_))
+ return false;
+ if (transfer_characteristics_ != kValueNotPresent &&
+ !IsTransferCharacteristicsValueValid(transfer_characteristics_)) {
+ return false;
+ }
+ if (primaries_ != kValueNotPresent && !IsPrimariesValueValid(primaries_))
+ return false;
+
+ return true;
+}
+
+bool Colour::Write(IMkvWriter* writer) const {
+ const uint64_t size = PayloadSize();
+
+ // Don't write an empty element.
+ if (size == 0)
+ return true;
+
+ // Don't write an invalid element.
+ if (!Valid())
+ return false;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvColour, size))
+ return false;
+
+ if (matrix_coefficients_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvMatrixCoefficients,
+ static_cast<uint64>(matrix_coefficients_))) {
+ return false;
+ }
+ if (bits_per_channel_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvBitsPerChannel,
+ static_cast<uint64>(bits_per_channel_))) {
+ return false;
+ }
+ if (chroma_subsampling_horz_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvChromaSubsamplingHorz,
+ static_cast<uint64>(chroma_subsampling_horz_))) {
+ return false;
+ }
+ if (chroma_subsampling_vert_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvChromaSubsamplingVert,
+ static_cast<uint64>(chroma_subsampling_vert_))) {
+ return false;
+ }
+
+ if (cb_subsampling_horz_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvCbSubsamplingHorz,
+ static_cast<uint64>(cb_subsampling_horz_))) {
+ return false;
+ }
+ if (cb_subsampling_vert_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvCbSubsamplingVert,
+ static_cast<uint64>(cb_subsampling_vert_))) {
+ return false;
+ }
+ if (chroma_siting_horz_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvChromaSitingHorz,
+ static_cast<uint64>(chroma_siting_horz_))) {
+ return false;
+ }
+ if (chroma_siting_vert_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvChromaSitingVert,
+ static_cast<uint64>(chroma_siting_vert_))) {
+ return false;
+ }
+ if (range_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvRange,
+ static_cast<uint64>(range_))) {
+ return false;
+ }
+ if (transfer_characteristics_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvTransferCharacteristics,
+ static_cast<uint64>(transfer_characteristics_))) {
+ return false;
+ }
+ if (primaries_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvPrimaries,
+ static_cast<uint64>(primaries_))) {
+ return false;
+ }
+ if (max_cll_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvMaxCLL,
+ static_cast<uint64>(max_cll_))) {
+ return false;
+ }
+ if (max_fall_ != kValueNotPresent &&
+ !WriteEbmlElement(writer, libwebm::kMkvMaxFALL,
+ static_cast<uint64>(max_fall_))) {
+ return false;
+ }
+
+ if (mastering_metadata_ && !mastering_metadata_->Write(writer))
+ return false;
+
+ return true;
+}
+
+bool Colour::SetMasteringMetadata(const MasteringMetadata& mastering_metadata) {
+ std::unique_ptr<MasteringMetadata> mm_ptr(new MasteringMetadata());
+ if (!mm_ptr.get())
+ return false;
+
+ mm_ptr->set_luminance_max(mastering_metadata.luminance_max());
+ mm_ptr->set_luminance_min(mastering_metadata.luminance_min());
+
+ if (!mm_ptr->SetChromaticity(mastering_metadata.r(), mastering_metadata.g(),
+ mastering_metadata.b(),
+ mastering_metadata.white_point())) {
+ return false;
+ }
+
+ delete mastering_metadata_;
+ mastering_metadata_ = mm_ptr.release();
+ return true;
+}
+
+uint64_t Colour::PayloadSize() const {
+ uint64_t size = 0;
+
+ if (matrix_coefficients_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvMatrixCoefficients,
+ static_cast<uint64>(matrix_coefficients_));
+ }
+ if (bits_per_channel_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvBitsPerChannel,
+ static_cast<uint64>(bits_per_channel_));
+ }
+ if (chroma_subsampling_horz_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvChromaSubsamplingHorz,
+ static_cast<uint64>(chroma_subsampling_horz_));
+ }
+ if (chroma_subsampling_vert_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvChromaSubsamplingVert,
+ static_cast<uint64>(chroma_subsampling_vert_));
+ }
+ if (cb_subsampling_horz_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvCbSubsamplingHorz,
+ static_cast<uint64>(cb_subsampling_horz_));
+ }
+ if (cb_subsampling_vert_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvCbSubsamplingVert,
+ static_cast<uint64>(cb_subsampling_vert_));
+ }
+ if (chroma_siting_horz_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvChromaSitingHorz,
+ static_cast<uint64>(chroma_siting_horz_));
+ }
+ if (chroma_siting_vert_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvChromaSitingVert,
+ static_cast<uint64>(chroma_siting_vert_));
+ }
+ if (range_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvRange, static_cast<uint64>(range_));
+ }
+ if (transfer_characteristics_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvTransferCharacteristics,
+ static_cast<uint64>(transfer_characteristics_));
+ }
+ if (primaries_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvPrimaries,
+ static_cast<uint64>(primaries_));
+ }
+ if (max_cll_ != kValueNotPresent) {
+ size += EbmlElementSize(libwebm::kMkvMaxCLL, static_cast<uint64>(max_cll_));
+ }
+ if (max_fall_ != kValueNotPresent) {
+ size +=
+ EbmlElementSize(libwebm::kMkvMaxFALL, static_cast<uint64>(max_fall_));
+ }
+
+ if (mastering_metadata_)
+ size += mastering_metadata_->MasteringMetadataSize();
+
+ return size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Projection element
+
+uint64_t Projection::ProjectionSize() const {
+ uint64_t size = PayloadSize();
+
+ if (size > 0)
+ size += EbmlMasterElementSize(libwebm::kMkvProjection, size);
+
+ return size;
+}
+
+bool Projection::Write(IMkvWriter* writer) const {
+ const uint64_t size = PayloadSize();
+
+ // Don't write an empty element.
+ if (size == 0)
+ return true;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvProjection, size))
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvProjectionType,
+ static_cast<uint64>(type_))) {
+ return false;
+ }
+
+ if (private_data_length_ > 0 && private_data_ != NULL &&
+ !WriteEbmlElement(writer, libwebm::kMkvProjectionPrivate, private_data_,
+ private_data_length_)) {
+ return false;
+ }
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvProjectionPoseYaw, pose_yaw_))
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvProjectionPosePitch,
+ pose_pitch_)) {
+ return false;
+ }
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvProjectionPoseRoll, pose_roll_)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool Projection::SetProjectionPrivate(const uint8_t* data,
+ uint64_t data_length) {
+ if (data == NULL || data_length == 0) {
+ return false;
+ }
+
+ if (data_length != static_cast<size_t>(data_length)) {
+ return false;
+ }
+
+ uint8_t* new_private_data =
+ new (std::nothrow) uint8_t[static_cast<size_t>(data_length)];
+ if (new_private_data == NULL) {
+ return false;
+ }
+
+ delete[] private_data_;
+ private_data_ = new_private_data;
+ private_data_length_ = data_length;
+ memcpy(private_data_, data, static_cast<size_t>(data_length));
+
+ return true;
+}
+
+uint64_t Projection::PayloadSize() const {
+ uint64_t size =
+ EbmlElementSize(libwebm::kMkvProjection, static_cast<uint64>(type_));
+
+ if (private_data_length_ > 0 && private_data_ != NULL) {
+ size += EbmlElementSize(libwebm::kMkvProjectionPrivate, private_data_,
+ private_data_length_);
+ }
+
+ size += EbmlElementSize(libwebm::kMkvProjectionPoseYaw, pose_yaw_);
+ size += EbmlElementSize(libwebm::kMkvProjectionPosePitch, pose_pitch_);
+ size += EbmlElementSize(libwebm::kMkvProjectionPoseRoll, pose_roll_);
+
+ return size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// VideoTrack Class
+
+VideoTrack::VideoTrack(unsigned int* seed)
+ : Track(seed),
+ display_height_(0),
+ display_width_(0),
+ pixel_height_(0),
+ pixel_width_(0),
+ crop_left_(0),
+ crop_right_(0),
+ crop_top_(0),
+ crop_bottom_(0),
+ frame_rate_(0.0),
+ height_(0),
+ stereo_mode_(0),
+ alpha_mode_(0),
+ width_(0),
+ colour_(NULL),
+ projection_(NULL) {}
+
+VideoTrack::~VideoTrack() {
+ delete colour_;
+ delete projection_;
+}
+
+bool VideoTrack::SetStereoMode(uint64_t stereo_mode) {
+ if (stereo_mode != kMono && stereo_mode != kSideBySideLeftIsFirst &&
+ stereo_mode != kTopBottomRightIsFirst &&
+ stereo_mode != kTopBottomLeftIsFirst &&
+ stereo_mode != kSideBySideRightIsFirst)
+ return false;
+
+ stereo_mode_ = stereo_mode;
+ return true;
+}
+
+bool VideoTrack::SetAlphaMode(uint64_t alpha_mode) {
+ if (alpha_mode != kNoAlpha && alpha_mode != kAlpha)
+ return false;
+
+ alpha_mode_ = alpha_mode;
+ return true;
+}
+
+uint64_t VideoTrack::PayloadSize() const {
+ const uint64_t parent_size = Track::PayloadSize();
+
+ uint64_t size = VideoPayloadSize();
+ size += EbmlMasterElementSize(libwebm::kMkvVideo, size);
+
+ return parent_size + size;
+}
+
+bool VideoTrack::Write(IMkvWriter* writer) const {
+ if (!Track::Write(writer))
+ return false;
+
+ const uint64_t size = VideoPayloadSize();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvVideo, size))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlElement(
+ writer, libwebm::kMkvPixelWidth,
+ static_cast<uint64>((pixel_width_ > 0) ? pixel_width_ : width_)))
+ return false;
+ if (!WriteEbmlElement(
+ writer, libwebm::kMkvPixelHeight,
+ static_cast<uint64>((pixel_height_ > 0) ? pixel_height_ : height_)))
+ return false;
+ if (display_width_ > 0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvDisplayWidth,
+ static_cast<uint64>(display_width_)))
+ return false;
+ }
+ if (display_height_ > 0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvDisplayHeight,
+ static_cast<uint64>(display_height_)))
+ return false;
+ }
+ if (crop_left_ > 0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropLeft,
+ static_cast<uint64>(crop_left_)))
+ return false;
+ }
+ if (crop_right_ > 0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropRight,
+ static_cast<uint64>(crop_right_)))
+ return false;
+ }
+ if (crop_top_ > 0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropTop,
+ static_cast<uint64>(crop_top_)))
+ return false;
+ }
+ if (crop_bottom_ > 0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvPixelCropBottom,
+ static_cast<uint64>(crop_bottom_)))
+ return false;
+ }
+ if (stereo_mode_ > kMono) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvStereoMode,
+ static_cast<uint64>(stereo_mode_)))
+ return false;
+ }
+ if (alpha_mode_ > kNoAlpha) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvAlphaMode,
+ static_cast<uint64>(alpha_mode_)))
+ return false;
+ }
+ if (frame_rate_ > 0.0) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvFrameRate,
+ static_cast<float>(frame_rate_))) {
+ return false;
+ }
+ }
+ if (colour_) {
+ if (!colour_->Write(writer))
+ return false;
+ }
+ if (projection_) {
+ if (!projection_->Write(writer))
+ return false;
+ }
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(size)) {
+ return false;
+ }
+
+ return true;
+}
+
+bool VideoTrack::SetColour(const Colour& colour) {
+ std::unique_ptr<Colour> colour_ptr(new Colour());
+ if (!colour_ptr.get())
+ return false;
+
+ if (colour.mastering_metadata()) {
+ if (!colour_ptr->SetMasteringMetadata(*colour.mastering_metadata()))
+ return false;
+ }
+
+ colour_ptr->set_matrix_coefficients(colour.matrix_coefficients());
+ colour_ptr->set_bits_per_channel(colour.bits_per_channel());
+ colour_ptr->set_chroma_subsampling_horz(colour.chroma_subsampling_horz());
+ colour_ptr->set_chroma_subsampling_vert(colour.chroma_subsampling_vert());
+ colour_ptr->set_cb_subsampling_horz(colour.cb_subsampling_horz());
+ colour_ptr->set_cb_subsampling_vert(colour.cb_subsampling_vert());
+ colour_ptr->set_chroma_siting_horz(colour.chroma_siting_horz());
+ colour_ptr->set_chroma_siting_vert(colour.chroma_siting_vert());
+ colour_ptr->set_range(colour.range());
+ colour_ptr->set_transfer_characteristics(colour.transfer_characteristics());
+ colour_ptr->set_primaries(colour.primaries());
+ colour_ptr->set_max_cll(colour.max_cll());
+ colour_ptr->set_max_fall(colour.max_fall());
+ delete colour_;
+ colour_ = colour_ptr.release();
+ return true;
+}
+
+bool VideoTrack::SetProjection(const Projection& projection) {
+ std::unique_ptr<Projection> projection_ptr(new Projection());
+ if (!projection_ptr.get())
+ return false;
+
+ if (projection.private_data()) {
+ if (!projection_ptr->SetProjectionPrivate(
+ projection.private_data(), projection.private_data_length())) {
+ return false;
+ }
+ }
+
+ projection_ptr->set_type(projection.type());
+ projection_ptr->set_pose_yaw(projection.pose_yaw());
+ projection_ptr->set_pose_pitch(projection.pose_pitch());
+ projection_ptr->set_pose_roll(projection.pose_roll());
+ delete projection_;
+ projection_ = projection_ptr.release();
+ return true;
+}
+
+uint64_t VideoTrack::VideoPayloadSize() const {
+ uint64_t size = EbmlElementSize(
+ libwebm::kMkvPixelWidth,
+ static_cast<uint64>((pixel_width_ > 0) ? pixel_width_ : width_));
+ size += EbmlElementSize(
+ libwebm::kMkvPixelHeight,
+ static_cast<uint64>((pixel_height_ > 0) ? pixel_height_ : height_));
+ if (display_width_ > 0)
+ size += EbmlElementSize(libwebm::kMkvDisplayWidth,
+ static_cast<uint64>(display_width_));
+ if (display_height_ > 0)
+ size += EbmlElementSize(libwebm::kMkvDisplayHeight,
+ static_cast<uint64>(display_height_));
+ if (crop_left_ > 0)
+ size += EbmlElementSize(libwebm::kMkvPixelCropLeft,
+ static_cast<uint64>(crop_left_));
+ if (crop_right_ > 0)
+ size += EbmlElementSize(libwebm::kMkvPixelCropRight,
+ static_cast<uint64>(crop_right_));
+ if (crop_top_ > 0)
+ size += EbmlElementSize(libwebm::kMkvPixelCropTop,
+ static_cast<uint64>(crop_top_));
+ if (crop_bottom_ > 0)
+ size += EbmlElementSize(libwebm::kMkvPixelCropBottom,
+ static_cast<uint64>(crop_bottom_));
+ if (stereo_mode_ > kMono)
+ size += EbmlElementSize(libwebm::kMkvStereoMode,
+ static_cast<uint64>(stereo_mode_));
+ if (alpha_mode_ > kNoAlpha)
+ size += EbmlElementSize(libwebm::kMkvAlphaMode,
+ static_cast<uint64>(alpha_mode_));
+ if (frame_rate_ > 0.0)
+ size += EbmlElementSize(libwebm::kMkvFrameRate,
+ static_cast<float>(frame_rate_));
+ if (colour_)
+ size += colour_->ColourSize();
+ if (projection_)
+ size += projection_->ProjectionSize();
+
+ return size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// AudioTrack Class
+
+AudioTrack::AudioTrack(unsigned int* seed)
+ : Track(seed), bit_depth_(0), channels_(1), sample_rate_(0.0) {}
+
+AudioTrack::~AudioTrack() {}
+
+uint64_t AudioTrack::PayloadSize() const {
+ const uint64_t parent_size = Track::PayloadSize();
+
+ uint64_t size = EbmlElementSize(libwebm::kMkvSamplingFrequency,
+ static_cast<float>(sample_rate_));
+ size +=
+ EbmlElementSize(libwebm::kMkvChannels, static_cast<uint64>(channels_));
+ if (bit_depth_ > 0)
+ size +=
+ EbmlElementSize(libwebm::kMkvBitDepth, static_cast<uint64>(bit_depth_));
+ size += EbmlMasterElementSize(libwebm::kMkvAudio, size);
+
+ return parent_size + size;
+}
+
+bool AudioTrack::Write(IMkvWriter* writer) const {
+ if (!Track::Write(writer))
+ return false;
+
+ // Calculate AudioSettings size.
+ uint64_t size = EbmlElementSize(libwebm::kMkvSamplingFrequency,
+ static_cast<float>(sample_rate_));
+ size +=
+ EbmlElementSize(libwebm::kMkvChannels, static_cast<uint64>(channels_));
+ if (bit_depth_ > 0)
+ size +=
+ EbmlElementSize(libwebm::kMkvBitDepth, static_cast<uint64>(bit_depth_));
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvAudio, size))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvSamplingFrequency,
+ static_cast<float>(sample_rate_)))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvChannels,
+ static_cast<uint64>(channels_)))
+ return false;
+ if (bit_depth_ > 0)
+ if (!WriteEbmlElement(writer, libwebm::kMkvBitDepth,
+ static_cast<uint64>(bit_depth_)))
+ return false;
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(size))
+ return false;
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Tracks Class
+
+const char Tracks::kOpusCodecId[] = "A_OPUS";
+const char Tracks::kVorbisCodecId[] = "A_VORBIS";
+const char Tracks::kVp8CodecId[] = "V_VP8";
+const char Tracks::kVp9CodecId[] = "V_VP9";
+const char Tracks::kVp10CodecId[] = "V_VP10";
+const char Tracks::kAV1CodecId[] = "V_AV1";
+const char Tracks::kWebVttCaptionsId[] = "D_WEBVTT/CAPTIONS";
+const char Tracks::kWebVttDescriptionsId[] = "D_WEBVTT/DESCRIPTIONS";
+const char Tracks::kWebVttMetadataId[] = "D_WEBVTT/METADATA";
+const char Tracks::kWebVttSubtitlesId[] = "D_WEBVTT/SUBTITLES";
+
+Tracks::Tracks()
+ : track_entries_(NULL), track_entries_size_(0), wrote_tracks_(false) {}
+
+Tracks::~Tracks() {
+ if (track_entries_) {
+ for (uint32_t i = 0; i < track_entries_size_; ++i) {
+ Track* const track = track_entries_[i];
+ delete track;
+ }
+ delete[] track_entries_;
+ }
+}
+
+bool Tracks::AddTrack(Track* track, int32_t number) {
+ if (number < 0 || wrote_tracks_)
+ return false;
+
+ // This muxer only supports track numbers in the range [1, 126], in
+ // order to be able (to use Matroska integer representation) to
+ // serialize the block header (of which the track number is a part)
+ // for a frame using exactly 4 bytes.
+
+ if (number > 0x7E)
+ return false;
+
+ uint32_t track_num = number;
+
+ if (track_num > 0) {
+ // Check to make sure a track does not already have |track_num|.
+ for (uint32_t i = 0; i < track_entries_size_; ++i) {
+ if (track_entries_[i]->number() == track_num)
+ return false;
+ }
+ }
+
+ const uint32_t count = track_entries_size_ + 1;
+
+ Track** const track_entries = new (std::nothrow) Track*[count]; // NOLINT
+ if (!track_entries)
+ return false;
+
+ for (uint32_t i = 0; i < track_entries_size_; ++i) {
+ track_entries[i] = track_entries_[i];
+ }
+
+ delete[] track_entries_;
+
+ // Find the lowest availible track number > 0.
+ if (track_num == 0) {
+ track_num = count;
+
+ // Check to make sure a track does not already have |track_num|.
+ bool exit = false;
+ do {
+ exit = true;
+ for (uint32_t i = 0; i < track_entries_size_; ++i) {
+ if (track_entries[i]->number() == track_num) {
+ track_num++;
+ exit = false;
+ break;
+ }
+ }
+ } while (!exit);
+ }
+ track->set_number(track_num);
+
+ track_entries_ = track_entries;
+ track_entries_[track_entries_size_] = track;
+ track_entries_size_ = count;
+ return true;
+}
+
+const Track* Tracks::GetTrackByIndex(uint32_t index) const {
+ if (track_entries_ == NULL)
+ return NULL;
+
+ if (index >= track_entries_size_)
+ return NULL;
+
+ return track_entries_[index];
+}
+
+Track* Tracks::GetTrackByNumber(uint64_t track_number) const {
+ const int32_t count = track_entries_size();
+ for (int32_t i = 0; i < count; ++i) {
+ if (track_entries_[i]->number() == track_number)
+ return track_entries_[i];
+ }
+
+ return NULL;
+}
+
+bool Tracks::TrackIsAudio(uint64_t track_number) const {
+ const Track* const track = GetTrackByNumber(track_number);
+
+ if (track->type() == kAudio)
+ return true;
+
+ return false;
+}
+
+bool Tracks::TrackIsVideo(uint64_t track_number) const {
+ const Track* const track = GetTrackByNumber(track_number);
+
+ if (track->type() == kVideo)
+ return true;
+
+ return false;
+}
+
+bool Tracks::Write(IMkvWriter* writer) const {
+ uint64_t size = 0;
+ const int32_t count = track_entries_size();
+ for (int32_t i = 0; i < count; ++i) {
+ const Track* const track = GetTrackByIndex(i);
+
+ if (!track)
+ return false;
+
+ size += track->Size();
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvTracks, size))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ for (int32_t i = 0; i < count; ++i) {
+ const Track* const track = GetTrackByIndex(i);
+ if (!track->Write(writer))
+ return false;
+ }
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(size))
+ return false;
+
+ wrote_tracks_ = true;
+ return true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Chapter Class
+
+bool Chapter::set_id(const char* id) { return StrCpy(id, &id_); }
+
+void Chapter::set_time(const Segment& segment, uint64_t start_ns,
+ uint64_t end_ns) {
+ const SegmentInfo* const info = segment.GetSegmentInfo();
+ const uint64_t timecode_scale = info->timecode_scale();
+ start_timecode_ = start_ns / timecode_scale;
+ end_timecode_ = end_ns / timecode_scale;
+}
+
+bool Chapter::add_string(const char* title, const char* language,
+ const char* country) {
+ if (!ExpandDisplaysArray())
+ return false;
+
+ Display& d = displays_[displays_count_++];
+ d.Init();
+
+ if (!d.set_title(title))
+ return false;
+
+ if (!d.set_language(language))
+ return false;
+
+ if (!d.set_country(country))
+ return false;
+
+ return true;
+}
+
+Chapter::Chapter() {
+ // This ctor only constructs the object. Proper initialization is
+ // done in Init() (called in Chapters::AddChapter()). The only
+ // reason we bother implementing this ctor is because we had to
+ // declare it as private (along with the dtor), in order to prevent
+ // clients from creating Chapter instances (a privelege we grant
+ // only to the Chapters class). Doing no initialization here also
+ // means that creating arrays of chapter objects is more efficient,
+ // because we only initialize each new chapter object as it becomes
+ // active on the array.
+}
+
+Chapter::~Chapter() {}
+
+void Chapter::Init(unsigned int* seed) {
+ id_ = NULL;
+ start_timecode_ = 0;
+ end_timecode_ = 0;
+ displays_ = NULL;
+ displays_size_ = 0;
+ displays_count_ = 0;
+ uid_ = MakeUID(seed);
+}
+
+void Chapter::ShallowCopy(Chapter* dst) const {
+ dst->id_ = id_;
+ dst->start_timecode_ = start_timecode_;
+ dst->end_timecode_ = end_timecode_;
+ dst->uid_ = uid_;
+ dst->displays_ = displays_;
+ dst->displays_size_ = displays_size_;
+ dst->displays_count_ = displays_count_;
+}
+
+void Chapter::Clear() {
+ StrCpy(NULL, &id_);
+
+ while (displays_count_ > 0) {
+ Display& d = displays_[--displays_count_];
+ d.Clear();
+ }
+
+ delete[] displays_;
+ displays_ = NULL;
+
+ displays_size_ = 0;
+}
+
+bool Chapter::ExpandDisplaysArray() {
+ if (displays_size_ > displays_count_)
+ return true; // nothing to do yet
+
+ const int size = (displays_size_ == 0) ? 1 : 2 * displays_size_;
+
+ Display* const displays = new (std::nothrow) Display[size]; // NOLINT
+ if (displays == NULL)
+ return false;
+
+ for (int idx = 0; idx < displays_count_; ++idx) {
+ displays[idx] = displays_[idx]; // shallow copy
+ }
+
+ delete[] displays_;
+
+ displays_ = displays;
+ displays_size_ = size;
+
+ return true;
+}
+
+uint64_t Chapter::WriteAtom(IMkvWriter* writer) const {
+ uint64_t payload_size =
+ EbmlElementSize(libwebm::kMkvChapterStringUID, id_) +
+ EbmlElementSize(libwebm::kMkvChapterUID, static_cast<uint64>(uid_)) +
+ EbmlElementSize(libwebm::kMkvChapterTimeStart,
+ static_cast<uint64>(start_timecode_)) +
+ EbmlElementSize(libwebm::kMkvChapterTimeEnd,
+ static_cast<uint64>(end_timecode_));
+
+ for (int idx = 0; idx < displays_count_; ++idx) {
+ const Display& d = displays_[idx];
+ payload_size += d.WriteDisplay(NULL);
+ }
+
+ const uint64_t atom_size =
+ EbmlMasterElementSize(libwebm::kMkvChapterAtom, payload_size) +
+ payload_size;
+
+ if (writer == NULL)
+ return atom_size;
+
+ const int64_t start = writer->Position();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvChapterAtom, payload_size))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapterStringUID, id_))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapterUID,
+ static_cast<uint64>(uid_)))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapterTimeStart,
+ static_cast<uint64>(start_timecode_)))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapterTimeEnd,
+ static_cast<uint64>(end_timecode_)))
+ return 0;
+
+ for (int idx = 0; idx < displays_count_; ++idx) {
+ const Display& d = displays_[idx];
+
+ if (!d.WriteDisplay(writer))
+ return 0;
+ }
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != atom_size)
+ return 0;
+
+ return atom_size;
+}
+
+void Chapter::Display::Init() {
+ title_ = NULL;
+ language_ = NULL;
+ country_ = NULL;
+}
+
+void Chapter::Display::Clear() {
+ StrCpy(NULL, &title_);
+ StrCpy(NULL, &language_);
+ StrCpy(NULL, &country_);
+}
+
+bool Chapter::Display::set_title(const char* title) {
+ return StrCpy(title, &title_);
+}
+
+bool Chapter::Display::set_language(const char* language) {
+ return StrCpy(language, &language_);
+}
+
+bool Chapter::Display::set_country(const char* country) {
+ return StrCpy(country, &country_);
+}
+
+uint64_t Chapter::Display::WriteDisplay(IMkvWriter* writer) const {
+ uint64_t payload_size = EbmlElementSize(libwebm::kMkvChapString, title_);
+
+ if (language_)
+ payload_size += EbmlElementSize(libwebm::kMkvChapLanguage, language_);
+
+ if (country_)
+ payload_size += EbmlElementSize(libwebm::kMkvChapCountry, country_);
+
+ const uint64_t display_size =
+ EbmlMasterElementSize(libwebm::kMkvChapterDisplay, payload_size) +
+ payload_size;
+
+ if (writer == NULL)
+ return display_size;
+
+ const int64_t start = writer->Position();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvChapterDisplay,
+ payload_size))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapString, title_))
+ return 0;
+
+ if (language_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapLanguage, language_))
+ return 0;
+ }
+
+ if (country_) {
+ if (!WriteEbmlElement(writer, libwebm::kMkvChapCountry, country_))
+ return 0;
+ }
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != display_size)
+ return 0;
+
+ return display_size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Chapters Class
+
+Chapters::Chapters() : chapters_size_(0), chapters_count_(0), chapters_(NULL) {}
+
+Chapters::~Chapters() {
+ while (chapters_count_ > 0) {
+ Chapter& chapter = chapters_[--chapters_count_];
+ chapter.Clear();
+ }
+
+ delete[] chapters_;
+ chapters_ = NULL;
+}
+
+int Chapters::Count() const { return chapters_count_; }
+
+Chapter* Chapters::AddChapter(unsigned int* seed) {
+ if (!ExpandChaptersArray())
+ return NULL;
+
+ Chapter& chapter = chapters_[chapters_count_++];
+ chapter.Init(seed);
+
+ return &chapter;
+}
+
+bool Chapters::Write(IMkvWriter* writer) const {
+ if (writer == NULL)
+ return false;
+
+ const uint64_t payload_size = WriteEdition(NULL); // return size only
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvChapters, payload_size))
+ return false;
+
+ const int64_t start = writer->Position();
+
+ if (WriteEdition(writer) == 0) // error
+ return false;
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != payload_size)
+ return false;
+
+ return true;
+}
+
+bool Chapters::ExpandChaptersArray() {
+ if (chapters_size_ > chapters_count_)
+ return true; // nothing to do yet
+
+ const int size = (chapters_size_ == 0) ? 1 : 2 * chapters_size_;
+
+ Chapter* const chapters = new (std::nothrow) Chapter[size]; // NOLINT
+ if (chapters == NULL)
+ return false;
+
+ for (int idx = 0; idx < chapters_count_; ++idx) {
+ const Chapter& src = chapters_[idx];
+ Chapter* const dst = chapters + idx;
+ src.ShallowCopy(dst);
+ }
+
+ delete[] chapters_;
+
+ chapters_ = chapters;
+ chapters_size_ = size;
+
+ return true;
+}
+
+uint64_t Chapters::WriteEdition(IMkvWriter* writer) const {
+ uint64_t payload_size = 0;
+
+ for (int idx = 0; idx < chapters_count_; ++idx) {
+ const Chapter& chapter = chapters_[idx];
+ payload_size += chapter.WriteAtom(NULL);
+ }
+
+ const uint64_t edition_size =
+ EbmlMasterElementSize(libwebm::kMkvEditionEntry, payload_size) +
+ payload_size;
+
+ if (writer == NULL) // return size only
+ return edition_size;
+
+ const int64_t start = writer->Position();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvEditionEntry, payload_size))
+ return 0; // error
+
+ for (int idx = 0; idx < chapters_count_; ++idx) {
+ const Chapter& chapter = chapters_[idx];
+
+ const uint64_t chapter_size = chapter.WriteAtom(writer);
+ if (chapter_size == 0) // error
+ return 0;
+ }
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != edition_size)
+ return 0;
+
+ return edition_size;
+}
+
+// Tag Class
+
+bool Tag::add_simple_tag(const char* tag_name, const char* tag_string) {
+ if (!ExpandSimpleTagsArray())
+ return false;
+
+ SimpleTag& st = simple_tags_[simple_tags_count_++];
+ st.Init();
+
+ if (!st.set_tag_name(tag_name))
+ return false;
+
+ if (!st.set_tag_string(tag_string))
+ return false;
+
+ return true;
+}
+
+Tag::Tag() {
+ simple_tags_ = NULL;
+ simple_tags_size_ = 0;
+ simple_tags_count_ = 0;
+}
+
+Tag::~Tag() {}
+
+void Tag::ShallowCopy(Tag* dst) const {
+ dst->simple_tags_ = simple_tags_;
+ dst->simple_tags_size_ = simple_tags_size_;
+ dst->simple_tags_count_ = simple_tags_count_;
+}
+
+void Tag::Clear() {
+ while (simple_tags_count_ > 0) {
+ SimpleTag& st = simple_tags_[--simple_tags_count_];
+ st.Clear();
+ }
+
+ delete[] simple_tags_;
+ simple_tags_ = NULL;
+
+ simple_tags_size_ = 0;
+}
+
+bool Tag::ExpandSimpleTagsArray() {
+ if (simple_tags_size_ > simple_tags_count_)
+ return true; // nothing to do yet
+
+ const int size = (simple_tags_size_ == 0) ? 1 : 2 * simple_tags_size_;
+
+ SimpleTag* const simple_tags = new (std::nothrow) SimpleTag[size]; // NOLINT
+ if (simple_tags == NULL)
+ return false;
+
+ for (int idx = 0; idx < simple_tags_count_; ++idx) {
+ simple_tags[idx] = simple_tags_[idx]; // shallow copy
+ }
+
+ delete[] simple_tags_;
+
+ simple_tags_ = simple_tags;
+ simple_tags_size_ = size;
+
+ return true;
+}
+
+uint64_t Tag::Write(IMkvWriter* writer) const {
+ uint64_t payload_size = 0;
+
+ for (int idx = 0; idx < simple_tags_count_; ++idx) {
+ const SimpleTag& st = simple_tags_[idx];
+ payload_size += st.Write(NULL);
+ }
+
+ const uint64_t tag_size =
+ EbmlMasterElementSize(libwebm::kMkvTag, payload_size) + payload_size;
+
+ if (writer == NULL)
+ return tag_size;
+
+ const int64_t start = writer->Position();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvTag, payload_size))
+ return 0;
+
+ for (int idx = 0; idx < simple_tags_count_; ++idx) {
+ const SimpleTag& st = simple_tags_[idx];
+
+ if (!st.Write(writer))
+ return 0;
+ }
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != tag_size)
+ return 0;
+
+ return tag_size;
+}
+
+// Tag::SimpleTag
+
+void Tag::SimpleTag::Init() {
+ tag_name_ = NULL;
+ tag_string_ = NULL;
+}
+
+void Tag::SimpleTag::Clear() {
+ StrCpy(NULL, &tag_name_);
+ StrCpy(NULL, &tag_string_);
+}
+
+bool Tag::SimpleTag::set_tag_name(const char* tag_name) {
+ return StrCpy(tag_name, &tag_name_);
+}
+
+bool Tag::SimpleTag::set_tag_string(const char* tag_string) {
+ return StrCpy(tag_string, &tag_string_);
+}
+
+uint64_t Tag::SimpleTag::Write(IMkvWriter* writer) const {
+ uint64_t payload_size = EbmlElementSize(libwebm::kMkvTagName, tag_name_);
+
+ payload_size += EbmlElementSize(libwebm::kMkvTagString, tag_string_);
+
+ const uint64_t simple_tag_size =
+ EbmlMasterElementSize(libwebm::kMkvSimpleTag, payload_size) +
+ payload_size;
+
+ if (writer == NULL)
+ return simple_tag_size;
+
+ const int64_t start = writer->Position();
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvSimpleTag, payload_size))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvTagName, tag_name_))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvTagString, tag_string_))
+ return 0;
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != simple_tag_size)
+ return 0;
+
+ return simple_tag_size;
+}
+
+// Tags Class
+
+Tags::Tags() : tags_size_(0), tags_count_(0), tags_(NULL) {}
+
+Tags::~Tags() {
+ while (tags_count_ > 0) {
+ Tag& tag = tags_[--tags_count_];
+ tag.Clear();
+ }
+
+ delete[] tags_;
+ tags_ = NULL;
+}
+
+int Tags::Count() const { return tags_count_; }
+
+Tag* Tags::AddTag() {
+ if (!ExpandTagsArray())
+ return NULL;
+
+ Tag& tag = tags_[tags_count_++];
+
+ return &tag;
+}
+
+bool Tags::Write(IMkvWriter* writer) const {
+ if (writer == NULL)
+ return false;
+
+ uint64_t payload_size = 0;
+
+ for (int idx = 0; idx < tags_count_; ++idx) {
+ const Tag& tag = tags_[idx];
+ payload_size += tag.Write(NULL);
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvTags, payload_size))
+ return false;
+
+ const int64_t start = writer->Position();
+
+ for (int idx = 0; idx < tags_count_; ++idx) {
+ const Tag& tag = tags_[idx];
+
+ const uint64_t tag_size = tag.Write(writer);
+ if (tag_size == 0) // error
+ return 0;
+ }
+
+ const int64_t stop = writer->Position();
+
+ if (stop >= start && uint64_t(stop - start) != payload_size)
+ return false;
+
+ return true;
+}
+
+bool Tags::ExpandTagsArray() {
+ if (tags_size_ > tags_count_)
+ return true; // nothing to do yet
+
+ const int size = (tags_size_ == 0) ? 1 : 2 * tags_size_;
+
+ Tag* const tags = new (std::nothrow) Tag[size]; // NOLINT
+ if (tags == NULL)
+ return false;
+
+ for (int idx = 0; idx < tags_count_; ++idx) {
+ const Tag& src = tags_[idx];
+ Tag* const dst = tags + idx;
+ src.ShallowCopy(dst);
+ }
+
+ delete[] tags_;
+
+ tags_ = tags;
+ tags_size_ = size;
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Cluster class
+
+Cluster::Cluster(uint64_t timecode, int64_t cues_pos, uint64_t timecode_scale,
+ bool write_last_frame_with_duration, bool fixed_size_timecode)
+ : blocks_added_(0),
+ finalized_(false),
+ fixed_size_timecode_(fixed_size_timecode),
+ header_written_(false),
+ payload_size_(0),
+ position_for_cues_(cues_pos),
+ size_position_(-1),
+ timecode_(timecode),
+ timecode_scale_(timecode_scale),
+ write_last_frame_with_duration_(write_last_frame_with_duration),
+ writer_(NULL) {}
+
+Cluster::~Cluster() {
+ // Delete any stored frames that are left behind. This will happen if the
+ // Cluster was not Finalized for whatever reason.
+ while (!stored_frames_.empty()) {
+ while (!stored_frames_.begin()->second.empty()) {
+ delete stored_frames_.begin()->second.front();
+ stored_frames_.begin()->second.pop_front();
+ }
+ stored_frames_.erase(stored_frames_.begin()->first);
+ }
+}
+
+bool Cluster::Init(IMkvWriter* ptr_writer) {
+ if (!ptr_writer) {
+ return false;
+ }
+ writer_ = ptr_writer;
+ return true;
+}
+
+bool Cluster::AddFrame(const Frame* const frame) {
+ return QueueOrWriteFrame(frame);
+}
+
+bool Cluster::AddFrame(const uint8_t* data, uint64_t length,
+ uint64_t track_number, uint64_t abs_timecode,
+ bool is_key) {
+ Frame frame;
+ if (!frame.Init(data, length))
+ return false;
+ frame.set_track_number(track_number);
+ frame.set_timestamp(abs_timecode);
+ frame.set_is_key(is_key);
+ return QueueOrWriteFrame(&frame);
+}
+
+bool Cluster::AddFrameWithAdditional(const uint8_t* data, uint64_t length,
+ const uint8_t* additional,
+ uint64_t additional_length,
+ uint64_t add_id, uint64_t track_number,
+ uint64_t abs_timecode, bool is_key) {
+ if (!additional || additional_length == 0) {
+ return false;
+ }
+ Frame frame;
+ if (!frame.Init(data, length) ||
+ !frame.AddAdditionalData(additional, additional_length, add_id)) {
+ return false;
+ }
+ frame.set_track_number(track_number);
+ frame.set_timestamp(abs_timecode);
+ frame.set_is_key(is_key);
+ return QueueOrWriteFrame(&frame);
+}
+
+bool Cluster::AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length,
+ int64_t discard_padding,
+ uint64_t track_number,
+ uint64_t abs_timecode, bool is_key) {
+ Frame frame;
+ if (!frame.Init(data, length))
+ return false;
+ frame.set_discard_padding(discard_padding);
+ frame.set_track_number(track_number);
+ frame.set_timestamp(abs_timecode);
+ frame.set_is_key(is_key);
+ return QueueOrWriteFrame(&frame);
+}
+
+bool Cluster::AddMetadata(const uint8_t* data, uint64_t length,
+ uint64_t track_number, uint64_t abs_timecode,
+ uint64_t duration_timecode) {
+ Frame frame;
+ if (!frame.Init(data, length))
+ return false;
+ frame.set_track_number(track_number);
+ frame.set_timestamp(abs_timecode);
+ frame.set_duration(duration_timecode);
+ frame.set_is_key(true); // All metadata blocks are keyframes.
+ return QueueOrWriteFrame(&frame);
+}
+
+void Cluster::AddPayloadSize(uint64_t size) { payload_size_ += size; }
+
+bool Cluster::Finalize() {
+ return !write_last_frame_with_duration_ && Finalize(false, 0);
+}
+
+bool Cluster::Finalize(bool set_last_frame_duration, uint64_t duration) {
+ if (!writer_ || finalized_)
+ return false;
+
+ if (write_last_frame_with_duration_) {
+ // Write out held back Frames. This essentially performs a k-way merge
+ // across all tracks in the increasing order of timestamps.
+ while (!stored_frames_.empty()) {
+ Frame* frame = stored_frames_.begin()->second.front();
+
+ // Get the next frame to write (frame with least timestamp across all
+ // tracks).
+ for (FrameMapIterator frames_iterator = ++stored_frames_.begin();
+ frames_iterator != stored_frames_.end(); ++frames_iterator) {
+ if (frames_iterator->second.front()->timestamp() < frame->timestamp()) {
+ frame = frames_iterator->second.front();
+ }
+ }
+
+ // Set the duration if it's the last frame for the track.
+ if (set_last_frame_duration &&
+ stored_frames_[frame->track_number()].size() == 1 &&
+ !frame->duration_set()) {
+ frame->set_duration(duration - frame->timestamp());
+ if (!frame->is_key() && !frame->reference_block_timestamp_set()) {
+ frame->set_reference_block_timestamp(
+ last_block_timestamp_[frame->track_number()]);
+ }
+ }
+
+ // Write the frame and remove it from |stored_frames_|.
+ const bool wrote_frame = DoWriteFrame(frame);
+ stored_frames_[frame->track_number()].pop_front();
+ if (stored_frames_[frame->track_number()].empty()) {
+ stored_frames_.erase(frame->track_number());
+ }
+ delete frame;
+ if (!wrote_frame)
+ return false;
+ }
+ }
+
+ if (size_position_ == -1)
+ return false;
+
+ if (writer_->Seekable()) {
+ const int64_t pos = writer_->Position();
+
+ if (writer_->Position(size_position_))
+ return false;
+
+ if (WriteUIntSize(writer_, payload_size(), 8))
+ return false;
+
+ if (writer_->Position(pos))
+ return false;
+ }
+
+ finalized_ = true;
+
+ return true;
+}
+
+uint64_t Cluster::Size() const {
+ const uint64_t element_size =
+ EbmlMasterElementSize(libwebm::kMkvCluster, 0xFFFFFFFFFFFFFFFFULL) +
+ payload_size_;
+ return element_size;
+}
+
+bool Cluster::PreWriteBlock() {
+ if (finalized_)
+ return false;
+
+ if (!header_written_) {
+ if (!WriteClusterHeader())
+ return false;
+ }
+
+ return true;
+}
+
+void Cluster::PostWriteBlock(uint64_t element_size) {
+ AddPayloadSize(element_size);
+ ++blocks_added_;
+}
+
+int64_t Cluster::GetRelativeTimecode(int64_t abs_timecode) const {
+ const int64_t cluster_timecode = this->Cluster::timecode();
+ const int64_t rel_timecode =
+ static_cast<int64_t>(abs_timecode) - cluster_timecode;
+
+ if (rel_timecode < 0 || rel_timecode > kMaxBlockTimecode)
+ return -1;
+
+ return rel_timecode;
+}
+
+bool Cluster::DoWriteFrame(const Frame* const frame) {
+ if (!frame || !frame->IsValid())
+ return false;
+
+ if (!PreWriteBlock())
+ return false;
+
+ const uint64_t element_size = WriteFrame(writer_, frame, this);
+ if (element_size == 0)
+ return false;
+
+ PostWriteBlock(element_size);
+ last_block_timestamp_[frame->track_number()] = frame->timestamp();
+ return true;
+}
+
+bool Cluster::QueueOrWriteFrame(const Frame* const frame) {
+ if (!frame || !frame->IsValid())
+ return false;
+
+ // If |write_last_frame_with_duration_| is not set, then write the frame right
+ // away.
+ if (!write_last_frame_with_duration_) {
+ return DoWriteFrame(frame);
+ }
+
+ // Queue the current frame.
+ uint64_t track_number = frame->track_number();
+ Frame* const frame_to_store = new Frame();
+ frame_to_store->CopyFrom(*frame);
+ stored_frames_[track_number].push_back(frame_to_store);
+
+ // Iterate through all queued frames in the current track except the last one
+ // and write it if it is okay to do so (i.e.) no other track has an held back
+ // frame with timestamp <= the timestamp of the frame in question.
+ std::vector<std::list<Frame*>::iterator> frames_to_erase;
+ for (std::list<Frame*>::iterator
+ current_track_iterator = stored_frames_[track_number].begin(),
+ end = --stored_frames_[track_number].end();
+ current_track_iterator != end; ++current_track_iterator) {
+ const Frame* const frame_to_write = *current_track_iterator;
+ bool okay_to_write = true;
+ for (FrameMapIterator track_iterator = stored_frames_.begin();
+ track_iterator != stored_frames_.end(); ++track_iterator) {
+ if (track_iterator->first == track_number) {
+ continue;
+ }
+ if (track_iterator->second.front()->timestamp() <
+ frame_to_write->timestamp()) {
+ okay_to_write = false;
+ break;
+ }
+ }
+ if (okay_to_write) {
+ const bool wrote_frame = DoWriteFrame(frame_to_write);
+ delete frame_to_write;
+ if (!wrote_frame)
+ return false;
+ frames_to_erase.push_back(current_track_iterator);
+ } else {
+ break;
+ }
+ }
+ for (std::vector<std::list<Frame*>::iterator>::iterator iterator =
+ frames_to_erase.begin();
+ iterator != frames_to_erase.end(); ++iterator) {
+ stored_frames_[track_number].erase(*iterator);
+ }
+ return true;
+}
+
+bool Cluster::WriteClusterHeader() {
+ if (finalized_)
+ return false;
+
+ if (WriteID(writer_, libwebm::kMkvCluster))
+ return false;
+
+ // Save for later.
+ size_position_ = writer_->Position();
+
+ // Write "unknown" (EBML coded -1) as cluster size value. We need to write 8
+ // bytes because we do not know how big our cluster will be.
+ if (SerializeInt(writer_, kEbmlUnknownValue, 8))
+ return false;
+
+ if (!WriteEbmlElement(writer_, libwebm::kMkvTimecode, timecode(),
+ fixed_size_timecode_ ? 8 : 0)) {
+ return false;
+ }
+ AddPayloadSize(EbmlElementSize(libwebm::kMkvTimecode, timecode(),
+ fixed_size_timecode_ ? 8 : 0));
+ header_written_ = true;
+
+ return true;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// SeekHead Class
+
+SeekHead::SeekHead() : start_pos_(0ULL) {
+ for (int32_t i = 0; i < kSeekEntryCount; ++i) {
+ seek_entry_id_[i] = 0;
+ seek_entry_pos_[i] = 0;
+ }
+}
+
+SeekHead::~SeekHead() {}
+
+bool SeekHead::Finalize(IMkvWriter* writer) const {
+ if (writer->Seekable()) {
+ if (start_pos_ == -1)
+ return false;
+
+ uint64_t payload_size = 0;
+ uint64_t entry_size[kSeekEntryCount];
+
+ for (int32_t i = 0; i < kSeekEntryCount; ++i) {
+ if (seek_entry_id_[i] != 0) {
+ entry_size[i] = EbmlElementSize(libwebm::kMkvSeekID,
+ static_cast<uint64>(seek_entry_id_[i]));
+ entry_size[i] += EbmlElementSize(
+ libwebm::kMkvSeekPosition, static_cast<uint64>(seek_entry_pos_[i]));
+
+ payload_size +=
+ EbmlMasterElementSize(libwebm::kMkvSeek, entry_size[i]) +
+ entry_size[i];
+ }
+ }
+
+ // No SeekHead elements
+ if (payload_size == 0)
+ return true;
+
+ const int64_t pos = writer->Position();
+ if (writer->Position(start_pos_))
+ return false;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvSeekHead, payload_size))
+ return false;
+
+ for (int32_t i = 0; i < kSeekEntryCount; ++i) {
+ if (seek_entry_id_[i] != 0) {
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvSeek, entry_size[i]))
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvSeekID,
+ static_cast<uint64>(seek_entry_id_[i])))
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvSeekPosition,
+ static_cast<uint64>(seek_entry_pos_[i])))
+ return false;
+ }
+ }
+
+ const uint64_t total_entry_size = kSeekEntryCount * MaxEntrySize();
+ const uint64_t total_size =
+ EbmlMasterElementSize(libwebm::kMkvSeekHead, total_entry_size) +
+ total_entry_size;
+ const int64_t size_left = total_size - (writer->Position() - start_pos_);
+
+ const uint64_t bytes_written = WriteVoidElement(writer, size_left);
+ if (!bytes_written)
+ return false;
+
+ if (writer->Position(pos))
+ return false;
+ }
+
+ return true;
+}
+
+bool SeekHead::Write(IMkvWriter* writer) {
+ const uint64_t entry_size = kSeekEntryCount * MaxEntrySize();
+ const uint64_t size =
+ EbmlMasterElementSize(libwebm::kMkvSeekHead, entry_size);
+
+ start_pos_ = writer->Position();
+
+ const uint64_t bytes_written = WriteVoidElement(writer, size + entry_size);
+ if (!bytes_written)
+ return false;
+
+ return true;
+}
+
+bool SeekHead::AddSeekEntry(uint32_t id, uint64_t pos) {
+ for (int32_t i = 0; i < kSeekEntryCount; ++i) {
+ if (seek_entry_id_[i] == 0) {
+ seek_entry_id_[i] = id;
+ seek_entry_pos_[i] = pos;
+ return true;
+ }
+ }
+ return false;
+}
+
+uint32_t SeekHead::GetId(int index) const {
+ if (index < 0 || index >= kSeekEntryCount)
+ return UINT_MAX;
+ return seek_entry_id_[index];
+}
+
+uint64_t SeekHead::GetPosition(int index) const {
+ if (index < 0 || index >= kSeekEntryCount)
+ return ULLONG_MAX;
+ return seek_entry_pos_[index];
+}
+
+bool SeekHead::SetSeekEntry(int index, uint32_t id, uint64_t position) {
+ if (index < 0 || index >= kSeekEntryCount)
+ return false;
+ seek_entry_id_[index] = id;
+ seek_entry_pos_[index] = position;
+ return true;
+}
+
+uint64_t SeekHead::MaxEntrySize() const {
+ const uint64_t max_entry_payload_size =
+ EbmlElementSize(libwebm::kMkvSeekID,
+ static_cast<uint64>(UINT64_C(0xffffffff))) +
+ EbmlElementSize(libwebm::kMkvSeekPosition,
+ static_cast<uint64>(UINT64_C(0xffffffffffffffff)));
+ const uint64_t max_entry_size =
+ EbmlMasterElementSize(libwebm::kMkvSeek, max_entry_payload_size) +
+ max_entry_payload_size;
+
+ return max_entry_size;
+}
+
+///////////////////////////////////////////////////////////////
+//
+// SegmentInfo Class
+
+SegmentInfo::SegmentInfo()
+ : duration_(-1.0),
+ muxing_app_(NULL),
+ timecode_scale_(1000000ULL),
+ writing_app_(NULL),
+ date_utc_(LLONG_MIN),
+ duration_pos_(-1) {}
+
+SegmentInfo::~SegmentInfo() {
+ delete[] muxing_app_;
+ delete[] writing_app_;
+}
+
+bool SegmentInfo::Init() {
+ int32_t major;
+ int32_t minor;
+ int32_t build;
+ int32_t revision;
+ GetVersion(&major, &minor, &build, &revision);
+ char temp[256];
+#ifdef _MSC_VER
+ sprintf_s(temp, sizeof(temp) / sizeof(temp[0]), "libwebm-%d.%d.%d.%d", major,
+ minor, build, revision);
+#else
+ snprintf(temp, sizeof(temp) / sizeof(temp[0]), "libwebm-%d.%d.%d.%d", major,
+ minor, build, revision);
+#endif
+
+ const size_t app_len = strlen(temp) + 1;
+
+ delete[] muxing_app_;
+
+ muxing_app_ = new (std::nothrow) char[app_len]; // NOLINT
+ if (!muxing_app_)
+ return false;
+
+#ifdef _MSC_VER
+ strcpy_s(muxing_app_, app_len, temp);
+#else
+ strcpy(muxing_app_, temp);
+#endif
+
+ set_writing_app(temp);
+ if (!writing_app_)
+ return false;
+ return true;
+}
+
+bool SegmentInfo::Finalize(IMkvWriter* writer) const {
+ if (!writer)
+ return false;
+
+ if (duration_ > 0.0) {
+ if (writer->Seekable()) {
+ if (duration_pos_ == -1)
+ return false;
+
+ const int64_t pos = writer->Position();
+
+ if (writer->Position(duration_pos_))
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvDuration,
+ static_cast<float>(duration_)))
+ return false;
+
+ if (writer->Position(pos))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool SegmentInfo::Write(IMkvWriter* writer) {
+ if (!writer || !muxing_app_ || !writing_app_)
+ return false;
+
+ uint64_t size = EbmlElementSize(libwebm::kMkvTimecodeScale,
+ static_cast<uint64>(timecode_scale_));
+ if (duration_ > 0.0)
+ size +=
+ EbmlElementSize(libwebm::kMkvDuration, static_cast<float>(duration_));
+ if (date_utc_ != LLONG_MIN)
+ size += EbmlDateElementSize(libwebm::kMkvDateUTC);
+ size += EbmlElementSize(libwebm::kMkvMuxingApp, muxing_app_);
+ size += EbmlElementSize(libwebm::kMkvWritingApp, writing_app_);
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvInfo, size))
+ return false;
+
+ const int64_t payload_position = writer->Position();
+ if (payload_position < 0)
+ return false;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvTimecodeScale,
+ static_cast<uint64>(timecode_scale_)))
+ return false;
+
+ if (duration_ > 0.0) {
+ // Save for later
+ duration_pos_ = writer->Position();
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvDuration,
+ static_cast<float>(duration_)))
+ return false;
+ }
+
+ if (date_utc_ != LLONG_MIN)
+ WriteEbmlDateElement(writer, libwebm::kMkvDateUTC, date_utc_);
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvMuxingApp, muxing_app_))
+ return false;
+ if (!WriteEbmlElement(writer, libwebm::kMkvWritingApp, writing_app_))
+ return false;
+
+ const int64_t stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64_t>(size))
+ return false;
+
+ return true;
+}
+
+void SegmentInfo::set_muxing_app(const char* app) {
+ if (app) {
+ const size_t length = strlen(app) + 1;
+ char* temp_str = new (std::nothrow) char[length]; // NOLINT
+ if (!temp_str)
+ return;
+
+#ifdef _MSC_VER
+ strcpy_s(temp_str, length, app);
+#else
+ strcpy(temp_str, app);
+#endif
+
+ delete[] muxing_app_;
+ muxing_app_ = temp_str;
+ }
+}
+
+void SegmentInfo::set_writing_app(const char* app) {
+ if (app) {
+ const size_t length = strlen(app) + 1;
+ char* temp_str = new (std::nothrow) char[length]; // NOLINT
+ if (!temp_str)
+ return;
+
+#ifdef _MSC_VER
+ strcpy_s(temp_str, length, app);
+#else
+ strcpy(temp_str, app);
+#endif
+
+ delete[] writing_app_;
+ writing_app_ = temp_str;
+ }
+}
+
+///////////////////////////////////////////////////////////////
+//
+// Segment Class
+
+Segment::Segment()
+ : chunk_count_(0),
+ chunk_name_(NULL),
+ chunk_writer_cluster_(NULL),
+ chunk_writer_cues_(NULL),
+ chunk_writer_header_(NULL),
+ chunking_(false),
+ chunking_base_name_(NULL),
+ cluster_list_(NULL),
+ cluster_list_capacity_(0),
+ cluster_list_size_(0),
+ cues_position_(kAfterClusters),
+ cues_track_(0),
+ force_new_cluster_(false),
+ frames_(NULL),
+ frames_capacity_(0),
+ frames_size_(0),
+ has_video_(false),
+ header_written_(false),
+ last_block_duration_(0),
+ last_timestamp_(0),
+ max_cluster_duration_(kDefaultMaxClusterDuration),
+ max_cluster_size_(0),
+ mode_(kFile),
+ new_cuepoint_(false),
+ output_cues_(true),
+ accurate_cluster_duration_(false),
+ fixed_size_cluster_timecode_(false),
+ estimate_file_duration_(false),
+ payload_pos_(0),
+ size_position_(0),
+ doc_type_version_(kDefaultDocTypeVersion),
+ doc_type_version_written_(0),
+ duration_(0.0),
+ writer_cluster_(NULL),
+ writer_cues_(NULL),
+ writer_header_(NULL) {
+ const time_t curr_time = time(NULL);
+ seed_ = static_cast<unsigned int>(curr_time);
+#ifdef _WIN32
+ srand(seed_);
+#endif
+}
+
+Segment::~Segment() {
+ if (cluster_list_) {
+ for (int32_t i = 0; i < cluster_list_size_; ++i) {
+ Cluster* const cluster = cluster_list_[i];
+ delete cluster;
+ }
+ delete[] cluster_list_;
+ }
+
+ if (frames_) {
+ for (int32_t i = 0; i < frames_size_; ++i) {
+ Frame* const frame = frames_[i];
+ delete frame;
+ }
+ delete[] frames_;
+ }
+
+ delete[] chunk_name_;
+ delete[] chunking_base_name_;
+
+ if (chunk_writer_cluster_) {
+ chunk_writer_cluster_->Close();
+ delete chunk_writer_cluster_;
+ }
+ if (chunk_writer_cues_) {
+ chunk_writer_cues_->Close();
+ delete chunk_writer_cues_;
+ }
+ if (chunk_writer_header_) {
+ chunk_writer_header_->Close();
+ delete chunk_writer_header_;
+ }
+}
+
+void Segment::MoveCuesBeforeClustersHelper(uint64_t diff, int32_t index,
+ uint64_t* cues_size) {
+ CuePoint* const cue_point = cues_.GetCueByIndex(index);
+ if (cue_point == NULL)
+ return;
+ const uint64_t old_cue_point_size = cue_point->Size();
+ const uint64_t cluster_pos = cue_point->cluster_pos() + diff;
+ cue_point->set_cluster_pos(cluster_pos); // update the new cluster position
+ // New size of the cue is computed as follows
+ // Let a = current sum of size of all CuePoints
+ // Let b = Increase in Cue Point's size due to this iteration
+ // Let c = Increase in size of Cues Element's length due to this iteration
+ // (This is computed as CodedSize(a + b) - CodedSize(a))
+ // Let d = b + c. Now d is the |diff| passed to the next recursive call.
+ // Let e = a + b. Now e is the |cues_size| passed to the next recursive
+ // call.
+ const uint64_t cue_point_size_diff = cue_point->Size() - old_cue_point_size;
+ const uint64_t cue_size_diff =
+ GetCodedUIntSize(*cues_size + cue_point_size_diff) -
+ GetCodedUIntSize(*cues_size);
+ *cues_size += cue_point_size_diff;
+ diff = cue_size_diff + cue_point_size_diff;
+ if (diff > 0) {
+ for (int32_t i = 0; i < cues_.cue_entries_size(); ++i) {
+ MoveCuesBeforeClustersHelper(diff, i, cues_size);
+ }
+ }
+}
+
+void Segment::MoveCuesBeforeClusters() {
+ const uint64_t current_cue_size = cues_.Size();
+ uint64_t cue_size = 0;
+ for (int32_t i = 0; i < cues_.cue_entries_size(); ++i)
+ cue_size += cues_.GetCueByIndex(i)->Size();
+ for (int32_t i = 0; i < cues_.cue_entries_size(); ++i)
+ MoveCuesBeforeClustersHelper(current_cue_size, i, &cue_size);
+
+ // Adjust the Seek Entry to reflect the change in position
+ // of Cluster and Cues
+ int32_t cluster_index = 0;
+ int32_t cues_index = 0;
+ for (int32_t i = 0; i < SeekHead::kSeekEntryCount; ++i) {
+ if (seek_head_.GetId(i) == libwebm::kMkvCluster)
+ cluster_index = i;
+ if (seek_head_.GetId(i) == libwebm::kMkvCues)
+ cues_index = i;
+ }
+ seek_head_.SetSeekEntry(cues_index, libwebm::kMkvCues,
+ seek_head_.GetPosition(cluster_index));
+ seek_head_.SetSeekEntry(cluster_index, libwebm::kMkvCluster,
+ cues_.Size() + seek_head_.GetPosition(cues_index));
+}
+
+bool Segment::Init(IMkvWriter* ptr_writer) {
+ if (!ptr_writer) {
+ return false;
+ }
+ writer_cluster_ = ptr_writer;
+ writer_cues_ = ptr_writer;
+ writer_header_ = ptr_writer;
+ memset(&track_frames_written_, 0,
+ sizeof(track_frames_written_[0]) * kMaxTrackNumber);
+ memset(&last_track_timestamp_, 0,
+ sizeof(last_track_timestamp_[0]) * kMaxTrackNumber);
+ return segment_info_.Init();
+}
+
+bool Segment::CopyAndMoveCuesBeforeClusters(mkvparser::IMkvReader* reader,
+ IMkvWriter* writer) {
+ if (!writer->Seekable() || chunking_)
+ return false;
+ const int64_t cluster_offset =
+ cluster_list_[0]->size_position() - GetUIntSize(libwebm::kMkvCluster);
+
+ // Copy the headers.
+ if (!ChunkedCopy(reader, writer, 0, cluster_offset))
+ return false;
+
+ // Recompute cue positions and seek entries.
+ MoveCuesBeforeClusters();
+
+ // Write cues and seek entries.
+ // TODO(vigneshv): As of now, it's safe to call seek_head_.Finalize() for the
+ // second time with a different writer object. But the name Finalize() doesn't
+ // indicate something we want to call more than once. So consider renaming it
+ // to write() or some such.
+ if (!cues_.Write(writer) || !seek_head_.Finalize(writer))
+ return false;
+
+ // Copy the Clusters.
+ if (!ChunkedCopy(reader, writer, cluster_offset,
+ cluster_end_offset_ - cluster_offset))
+ return false;
+
+ // Update the Segment size in case the Cues size has changed.
+ const int64_t pos = writer->Position();
+ const int64_t segment_size = writer->Position() - payload_pos_;
+ if (writer->Position(size_position_) ||
+ WriteUIntSize(writer, segment_size, 8) || writer->Position(pos))
+ return false;
+ return true;
+}
+
+bool Segment::Finalize() {
+ if (WriteFramesAll() < 0)
+ return false;
+
+ // In kLive mode, call Cluster::Finalize only if |accurate_cluster_duration_|
+ // is set. In all other modes, always call Cluster::Finalize.
+ if ((mode_ == kLive ? accurate_cluster_duration_ : true) &&
+ cluster_list_size_ > 0) {
+ // Update last cluster's size
+ Cluster* const old_cluster = cluster_list_[cluster_list_size_ - 1];
+
+ // For the last frame of the last Cluster, we don't write it as a BlockGroup
+ // with Duration unless the frame itself has duration set explicitly.
+ if (!old_cluster || !old_cluster->Finalize(false, 0))
+ return false;
+ }
+
+ if (mode_ == kFile) {
+ if (chunking_ && chunk_writer_cluster_) {
+ chunk_writer_cluster_->Close();
+ chunk_count_++;
+ }
+
+ double duration =
+ (static_cast<double>(last_timestamp_) + last_block_duration_) /
+ segment_info_.timecode_scale();
+ if (duration_ > 0.0) {
+ duration = duration_;
+ } else {
+ if (last_block_duration_ == 0 && estimate_file_duration_) {
+ const int num_tracks = static_cast<int>(tracks_.track_entries_size());
+ for (int i = 0; i < num_tracks; ++i) {
+ if (track_frames_written_[i] < 2)
+ continue;
+
+ // Estimate the duration for the last block of a Track.
+ const double nano_per_frame =
+ static_cast<double>(last_track_timestamp_[i]) /
+ (track_frames_written_[i] - 1);
+ const double track_duration =
+ (last_track_timestamp_[i] + nano_per_frame) /
+ segment_info_.timecode_scale();
+ if (track_duration > duration)
+ duration = track_duration;
+ }
+ }
+ }
+ segment_info_.set_duration(duration);
+ if (!segment_info_.Finalize(writer_header_))
+ return false;
+
+ if (output_cues_)
+ if (!seek_head_.AddSeekEntry(libwebm::kMkvCues, MaxOffset()))
+ return false;
+
+ if (chunking_) {
+ if (!chunk_writer_cues_)
+ return false;
+
+ char* name = NULL;
+ if (!UpdateChunkName("cues", &name))
+ return false;
+
+ const bool cues_open = chunk_writer_cues_->Open(name);
+ delete[] name;
+ if (!cues_open)
+ return false;
+ }
+
+ cluster_end_offset_ = writer_cluster_->Position();
+
+ // Write the seek headers and cues
+ if (output_cues_)
+ if (!cues_.Write(writer_cues_))
+ return false;
+
+ if (!seek_head_.Finalize(writer_header_))
+ return false;
+
+ if (writer_header_->Seekable()) {
+ if (size_position_ == -1)
+ return false;
+
+ const int64_t segment_size = MaxOffset();
+ if (segment_size < 1)
+ return false;
+
+ const int64_t pos = writer_header_->Position();
+ UpdateDocTypeVersion();
+ if (doc_type_version_ != doc_type_version_written_) {
+ if (writer_header_->Position(0))
+ return false;
+
+ const char* const doc_type =
+ DocTypeIsWebm() ? kDocTypeWebm : kDocTypeMatroska;
+ if (!WriteEbmlHeader(writer_header_, doc_type_version_, doc_type))
+ return false;
+ if (writer_header_->Position() != ebml_header_size_)
+ return false;
+
+ doc_type_version_written_ = doc_type_version_;
+ }
+
+ if (writer_header_->Position(size_position_))
+ return false;
+
+ if (WriteUIntSize(writer_header_, segment_size, 8))
+ return false;
+
+ if (writer_header_->Position(pos))
+ return false;
+ }
+
+ if (chunking_) {
+ // Do not close any writers until the segment size has been written,
+ // otherwise the size may be off.
+ if (!chunk_writer_cues_ || !chunk_writer_header_)
+ return false;
+
+ chunk_writer_cues_->Close();
+ chunk_writer_header_->Close();
+ }
+ }
+
+ return true;
+}
+
+Track* Segment::AddTrack(int32_t number) {
+ Track* const track = new (std::nothrow) Track(&seed_); // NOLINT
+
+ if (!track)
+ return NULL;
+
+ if (!tracks_.AddTrack(track, number)) {
+ delete track;
+ return NULL;
+ }
+
+ return track;
+}
+
+Chapter* Segment::AddChapter() { return chapters_.AddChapter(&seed_); }
+
+Tag* Segment::AddTag() { return tags_.AddTag(); }
+
+uint64_t Segment::AddVideoTrack(int32_t width, int32_t height, int32_t number) {
+ VideoTrack* const track = new (std::nothrow) VideoTrack(&seed_); // NOLINT
+ if (!track)
+ return 0;
+
+ track->set_type(Tracks::kVideo);
+ track->set_codec_id(Tracks::kVp8CodecId);
+ track->set_width(width);
+ track->set_height(height);
+
+ if (!tracks_.AddTrack(track, number)) {
+ delete track;
+ return 0;
+ }
+ has_video_ = true;
+
+ return track->number();
+}
+
+bool Segment::AddCuePoint(uint64_t timestamp, uint64_t track) {
+ if (cluster_list_size_ < 1)
+ return false;
+
+ const Cluster* const cluster = cluster_list_[cluster_list_size_ - 1];
+ if (!cluster)
+ return false;
+
+ CuePoint* const cue = new (std::nothrow) CuePoint(); // NOLINT
+ if (!cue)
+ return false;
+
+ cue->set_time(timestamp / segment_info_.timecode_scale());
+ cue->set_block_number(cluster->blocks_added());
+ cue->set_cluster_pos(cluster->position_for_cues());
+ cue->set_track(track);
+ if (!cues_.AddCue(cue)) {
+ delete cue;
+ return false;
+ }
+
+ new_cuepoint_ = false;
+ return true;
+}
+
+uint64_t Segment::AddAudioTrack(int32_t sample_rate, int32_t channels,
+ int32_t number) {
+ AudioTrack* const track = new (std::nothrow) AudioTrack(&seed_); // NOLINT
+ if (!track)
+ return 0;
+
+ track->set_type(Tracks::kAudio);
+ track->set_codec_id(Tracks::kVorbisCodecId);
+ track->set_sample_rate(sample_rate);
+ track->set_channels(channels);
+
+ if (!tracks_.AddTrack(track, number)) {
+ delete track;
+ return 0;
+ }
+
+ return track->number();
+}
+
+bool Segment::AddFrame(const uint8_t* data, uint64_t length,
+ uint64_t track_number, uint64_t timestamp, bool is_key) {
+ if (!data)
+ return false;
+
+ Frame frame;
+ if (!frame.Init(data, length))
+ return false;
+ frame.set_track_number(track_number);
+ frame.set_timestamp(timestamp);
+ frame.set_is_key(is_key);
+ return AddGenericFrame(&frame);
+}
+
+bool Segment::AddFrameWithAdditional(const uint8_t* data, uint64_t length,
+ const uint8_t* additional,
+ uint64_t additional_length,
+ uint64_t add_id, uint64_t track_number,
+ uint64_t timestamp, bool is_key) {
+ if (!data || !additional)
+ return false;
+
+ Frame frame;
+ if (!frame.Init(data, length) ||
+ !frame.AddAdditionalData(additional, additional_length, add_id)) {
+ return false;
+ }
+ frame.set_track_number(track_number);
+ frame.set_timestamp(timestamp);
+ frame.set_is_key(is_key);
+ return AddGenericFrame(&frame);
+}
+
+bool Segment::AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length,
+ int64_t discard_padding,
+ uint64_t track_number,
+ uint64_t timestamp, bool is_key) {
+ if (!data)
+ return false;
+
+ Frame frame;
+ if (!frame.Init(data, length))
+ return false;
+ frame.set_discard_padding(discard_padding);
+ frame.set_track_number(track_number);
+ frame.set_timestamp(timestamp);
+ frame.set_is_key(is_key);
+ return AddGenericFrame(&frame);
+}
+
+bool Segment::AddMetadata(const uint8_t* data, uint64_t length,
+ uint64_t track_number, uint64_t timestamp_ns,
+ uint64_t duration_ns) {
+ if (!data)
+ return false;
+
+ Frame frame;
+ if (!frame.Init(data, length))
+ return false;
+ frame.set_track_number(track_number);
+ frame.set_timestamp(timestamp_ns);
+ frame.set_duration(duration_ns);
+ frame.set_is_key(true); // All metadata blocks are keyframes.
+ return AddGenericFrame(&frame);
+}
+
+bool Segment::AddGenericFrame(const Frame* frame) {
+ if (!frame)
+ return false;
+
+ if (!CheckHeaderInfo())
+ return false;
+
+ // Check for non-monotonically increasing timestamps.
+ if (frame->timestamp() < last_timestamp_)
+ return false;
+
+ // Check if the track number is valid.
+ if (!tracks_.GetTrackByNumber(frame->track_number()))
+ return false;
+
+ if (frame->discard_padding() != 0)
+ doc_type_version_ = 4;
+
+ if (cluster_list_size_ > 0) {
+ const uint64_t timecode_scale = segment_info_.timecode_scale();
+ const uint64_t frame_timecode = frame->timestamp() / timecode_scale;
+
+ const Cluster* const last_cluster = cluster_list_[cluster_list_size_ - 1];
+ const uint64_t last_cluster_timecode = last_cluster->timecode();
+
+ const uint64_t rel_timecode = frame_timecode - last_cluster_timecode;
+ if (rel_timecode > kMaxBlockTimecode) {
+ force_new_cluster_ = true;
+ }
+ }
+
+ // If the segment has a video track hold onto audio frames to make sure the
+ // audio that is associated with the start time of a video key-frame is
+ // muxed into the same cluster.
+ if (has_video_ && tracks_.TrackIsAudio(frame->track_number()) &&
+ !force_new_cluster_) {
+ Frame* const new_frame = new (std::nothrow) Frame();
+ if (!new_frame || !new_frame->CopyFrom(*frame)) {
+ delete new_frame;
+ return false;
+ }
+ if (!QueueFrame(new_frame)) {
+ delete new_frame;
+ return false;
+ }
+ track_frames_written_[frame->track_number() - 1]++;
+ return true;
+ }
+
+ if (!DoNewClusterProcessing(frame->track_number(), frame->timestamp(),
+ frame->is_key())) {
+ return false;
+ }
+
+ if (cluster_list_size_ < 1)
+ return false;
+
+ Cluster* const cluster = cluster_list_[cluster_list_size_ - 1];
+ if (!cluster)
+ return false;
+
+ // If the Frame is not a SimpleBlock, then set the reference_block_timestamp
+ // if it is not set already.
+ bool frame_created = false;
+ if (!frame->CanBeSimpleBlock() && !frame->is_key() &&
+ !frame->reference_block_timestamp_set()) {
+ Frame* const new_frame = new (std::nothrow) Frame();
+ if (!new_frame || !new_frame->CopyFrom(*frame)) {
+ delete new_frame;
+ return false;
+ }
+ new_frame->set_reference_block_timestamp(
+ last_track_timestamp_[frame->track_number() - 1]);
+ frame = new_frame;
+ frame_created = true;
+ }
+
+ if (!cluster->AddFrame(frame))
+ return false;
+
+ if (new_cuepoint_ && cues_track_ == frame->track_number()) {
+ if (!AddCuePoint(frame->timestamp(), cues_track_))
+ return false;
+ }
+
+ last_timestamp_ = frame->timestamp();
+ last_track_timestamp_[frame->track_number() - 1] = frame->timestamp();
+ last_block_duration_ = frame->duration();
+ track_frames_written_[frame->track_number() - 1]++;
+
+ if (frame_created)
+ delete frame;
+ return true;
+}
+
+void Segment::OutputCues(bool output_cues) { output_cues_ = output_cues; }
+
+void Segment::AccurateClusterDuration(bool accurate_cluster_duration) {
+ accurate_cluster_duration_ = accurate_cluster_duration;
+}
+
+void Segment::UseFixedSizeClusterTimecode(bool fixed_size_cluster_timecode) {
+ fixed_size_cluster_timecode_ = fixed_size_cluster_timecode;
+}
+
+bool Segment::SetChunking(bool chunking, const char* filename) {
+ if (chunk_count_ > 0)
+ return false;
+
+ if (chunking) {
+ if (!filename)
+ return false;
+
+ // Check if we are being set to what is already set.
+ if (chunking_ && !strcmp(filename, chunking_base_name_))
+ return true;
+
+ const size_t name_length = strlen(filename) + 1;
+ char* const temp = new (std::nothrow) char[name_length]; // NOLINT
+ if (!temp)
+ return false;
+
+#ifdef _MSC_VER
+ strcpy_s(temp, name_length, filename);
+#else
+ strcpy(temp, filename);
+#endif
+
+ delete[] chunking_base_name_;
+ chunking_base_name_ = temp;
+
+ if (!UpdateChunkName("chk", &chunk_name_))
+ return false;
+
+ if (!chunk_writer_cluster_) {
+ chunk_writer_cluster_ = new (std::nothrow) MkvWriter(); // NOLINT
+ if (!chunk_writer_cluster_)
+ return false;
+ }
+
+ if (!chunk_writer_cues_) {
+ chunk_writer_cues_ = new (std::nothrow) MkvWriter(); // NOLINT
+ if (!chunk_writer_cues_)
+ return false;
+ }
+
+ if (!chunk_writer_header_) {
+ chunk_writer_header_ = new (std::nothrow) MkvWriter(); // NOLINT
+ if (!chunk_writer_header_)
+ return false;
+ }
+
+ if (!chunk_writer_cluster_->Open(chunk_name_))
+ return false;
+
+ const size_t header_length = strlen(filename) + strlen(".hdr") + 1;
+ char* const header = new (std::nothrow) char[header_length]; // NOLINT
+ if (!header)
+ return false;
+
+#ifdef _MSC_VER
+ strcpy_s(header, header_length - strlen(".hdr"), chunking_base_name_);
+ strcat_s(header, header_length, ".hdr");
+#else
+ strcpy(header, chunking_base_name_);
+ strcat(header, ".hdr");
+#endif
+ if (!chunk_writer_header_->Open(header)) {
+ delete[] header;
+ return false;
+ }
+
+ writer_cluster_ = chunk_writer_cluster_;
+ writer_cues_ = chunk_writer_cues_;
+ writer_header_ = chunk_writer_header_;
+
+ delete[] header;
+ }
+
+ chunking_ = chunking;
+
+ return true;
+}
+
+bool Segment::CuesTrack(uint64_t track_number) {
+ const Track* const track = GetTrackByNumber(track_number);
+ if (!track)
+ return false;
+
+ cues_track_ = track_number;
+ return true;
+}
+
+void Segment::ForceNewClusterOnNextFrame() { force_new_cluster_ = true; }
+
+Track* Segment::GetTrackByNumber(uint64_t track_number) const {
+ return tracks_.GetTrackByNumber(track_number);
+}
+
+bool Segment::WriteSegmentHeader() {
+ UpdateDocTypeVersion();
+
+ const char* const doc_type =
+ DocTypeIsWebm() ? kDocTypeWebm : kDocTypeMatroska;
+ if (!WriteEbmlHeader(writer_header_, doc_type_version_, doc_type))
+ return false;
+ doc_type_version_written_ = doc_type_version_;
+ ebml_header_size_ = static_cast<int32_t>(writer_header_->Position());
+
+ // Write "unknown" (-1) as segment size value. If mode is kFile, Segment
+ // will write over duration when the file is finalized.
+ if (WriteID(writer_header_, libwebm::kMkvSegment))
+ return false;
+
+ // Save for later.
+ size_position_ = writer_header_->Position();
+
+ // Write "unknown" (EBML coded -1) as segment size value. We need to write 8
+ // bytes because if we are going to overwrite the segment size later we do
+ // not know how big our segment will be.
+ if (SerializeInt(writer_header_, kEbmlUnknownValue, 8))
+ return false;
+
+ payload_pos_ = writer_header_->Position();
+
+ if (mode_ == kFile && writer_header_->Seekable()) {
+ // Set the duration > 0.0 so SegmentInfo will write out the duration. When
+ // the muxer is done writing we will set the correct duration and have
+ // SegmentInfo upadte it.
+ segment_info_.set_duration(1.0);
+
+ if (!seek_head_.Write(writer_header_))
+ return false;
+ }
+
+ if (!seek_head_.AddSeekEntry(libwebm::kMkvInfo, MaxOffset()))
+ return false;
+ if (!segment_info_.Write(writer_header_))
+ return false;
+
+ if (!seek_head_.AddSeekEntry(libwebm::kMkvTracks, MaxOffset()))
+ return false;
+ if (!tracks_.Write(writer_header_))
+ return false;
+
+ if (chapters_.Count() > 0) {
+ if (!seek_head_.AddSeekEntry(libwebm::kMkvChapters, MaxOffset()))
+ return false;
+ if (!chapters_.Write(writer_header_))
+ return false;
+ }
+
+ if (tags_.Count() > 0) {
+ if (!seek_head_.AddSeekEntry(libwebm::kMkvTags, MaxOffset()))
+ return false;
+ if (!tags_.Write(writer_header_))
+ return false;
+ }
+
+ if (chunking_ && (mode_ == kLive || !writer_header_->Seekable())) {
+ if (!chunk_writer_header_)
+ return false;
+
+ chunk_writer_header_->Close();
+ }
+
+ header_written_ = true;
+
+ return true;
+}
+
+// Here we are testing whether to create a new cluster, given a frame
+// having time frame_timestamp_ns.
+//
+int Segment::TestFrame(uint64_t track_number, uint64_t frame_timestamp_ns,
+ bool is_key) const {
+ if (force_new_cluster_)
+ return 1;
+
+ // If no clusters have been created yet, then create a new cluster
+ // and write this frame immediately, in the new cluster. This path
+ // should only be followed once, the first time we attempt to write
+ // a frame.
+
+ if (cluster_list_size_ <= 0)
+ return 1;
+
+ // There exists at least one cluster. We must compare the frame to
+ // the last cluster, in order to determine whether the frame is
+ // written to the existing cluster, or that a new cluster should be
+ // created.
+
+ const uint64_t timecode_scale = segment_info_.timecode_scale();
+ const uint64_t frame_timecode = frame_timestamp_ns / timecode_scale;
+
+ const Cluster* const last_cluster = cluster_list_[cluster_list_size_ - 1];
+ const uint64_t last_cluster_timecode = last_cluster->timecode();
+
+ // For completeness we test for the case when the frame's timecode
+ // is less than the cluster's timecode. Although in principle that
+ // is allowed, this muxer doesn't actually write clusters like that,
+ // so this indicates a bug somewhere in our algorithm.
+
+ if (frame_timecode < last_cluster_timecode) // should never happen
+ return -1;
+
+ // If the frame has a timestamp significantly larger than the last
+ // cluster (in Matroska, cluster-relative timestamps are serialized
+ // using a 16-bit signed integer), then we cannot write this frame
+ // to that cluster, and so we must create a new cluster.
+
+ const int64_t delta_timecode = frame_timecode - last_cluster_timecode;
+
+ if (delta_timecode > kMaxBlockTimecode)
+ return 2;
+
+ // We decide to create a new cluster when we have a video keyframe.
+ // This will flush queued (audio) frames, and write the keyframe
+ // immediately, in the newly-created cluster.
+
+ if (is_key && tracks_.TrackIsVideo(track_number))
+ return 1;
+
+ // Create a new cluster if we have accumulated too many frames
+ // already, where "too many" is defined as "the total time of frames
+ // in the cluster exceeds a threshold".
+
+ const uint64_t delta_ns = delta_timecode * timecode_scale;
+
+ if (max_cluster_duration_ > 0 && delta_ns >= max_cluster_duration_)
+ return 1;
+
+ // This is similar to the case above, with the difference that a new
+ // cluster is created when the size of the current cluster exceeds a
+ // threshold.
+
+ const uint64_t cluster_size = last_cluster->payload_size();
+
+ if (max_cluster_size_ > 0 && cluster_size >= max_cluster_size_)
+ return 1;
+
+ // There's no need to create a new cluster, so emit this frame now.
+
+ return 0;
+}
+
+bool Segment::MakeNewCluster(uint64_t frame_timestamp_ns) {
+ const int32_t new_size = cluster_list_size_ + 1;
+
+ if (new_size > cluster_list_capacity_) {
+ // Add more clusters.
+ const int32_t new_capacity =
+ (cluster_list_capacity_ <= 0) ? 1 : cluster_list_capacity_ * 2;
+ Cluster** const clusters =
+ new (std::nothrow) Cluster*[new_capacity]; // NOLINT
+ if (!clusters)
+ return false;
+
+ for (int32_t i = 0; i < cluster_list_size_; ++i) {
+ clusters[i] = cluster_list_[i];
+ }
+
+ delete[] cluster_list_;
+
+ cluster_list_ = clusters;
+ cluster_list_capacity_ = new_capacity;
+ }
+
+ if (!WriteFramesLessThan(frame_timestamp_ns))
+ return false;
+
+ if (cluster_list_size_ > 0) {
+ // Update old cluster's size
+ Cluster* const old_cluster = cluster_list_[cluster_list_size_ - 1];
+
+ if (!old_cluster || !old_cluster->Finalize(true, frame_timestamp_ns))
+ return false;
+ }
+
+ if (output_cues_)
+ new_cuepoint_ = true;
+
+ if (chunking_ && cluster_list_size_ > 0) {
+ chunk_writer_cluster_->Close();
+ chunk_count_++;
+
+ if (!UpdateChunkName("chk", &chunk_name_))
+ return false;
+ if (!chunk_writer_cluster_->Open(chunk_name_))
+ return false;
+ }
+
+ const uint64_t timecode_scale = segment_info_.timecode_scale();
+ const uint64_t frame_timecode = frame_timestamp_ns / timecode_scale;
+
+ uint64_t cluster_timecode = frame_timecode;
+
+ if (frames_size_ > 0) {
+ const Frame* const f = frames_[0]; // earliest queued frame
+ const uint64_t ns = f->timestamp();
+ const uint64_t tc = ns / timecode_scale;
+
+ if (tc < cluster_timecode)
+ cluster_timecode = tc;
+ }
+
+ Cluster*& cluster = cluster_list_[cluster_list_size_];
+ const int64_t offset = MaxOffset();
+ cluster = new (std::nothrow)
+ Cluster(cluster_timecode, offset, segment_info_.timecode_scale(),
+ accurate_cluster_duration_, fixed_size_cluster_timecode_);
+ if (!cluster)
+ return false;
+
+ if (!cluster->Init(writer_cluster_))
+ return false;
+
+ cluster_list_size_ = new_size;
+ return true;
+}
+
+bool Segment::DoNewClusterProcessing(uint64_t track_number,
+ uint64_t frame_timestamp_ns, bool is_key) {
+ for (;;) {
+ // Based on the characteristics of the current frame and current
+ // cluster, decide whether to create a new cluster.
+ const int result = TestFrame(track_number, frame_timestamp_ns, is_key);
+ if (result < 0) // error
+ return false;
+
+ // Always set force_new_cluster_ to false after TestFrame.
+ force_new_cluster_ = false;
+
+ // A non-zero result means create a new cluster.
+ if (result > 0 && !MakeNewCluster(frame_timestamp_ns))
+ return false;
+
+ // Write queued (audio) frames.
+ const int frame_count = WriteFramesAll();
+ if (frame_count < 0) // error
+ return false;
+
+ // Write the current frame to the current cluster (if TestFrame
+ // returns 0) or to a newly created cluster (TestFrame returns 1).
+ if (result <= 1)
+ return true;
+
+ // TestFrame returned 2, which means there was a large time
+ // difference between the cluster and the frame itself. Do the
+ // test again, comparing the frame to the new cluster.
+ }
+}
+
+bool Segment::CheckHeaderInfo() {
+ if (!header_written_) {
+ if (!WriteSegmentHeader())
+ return false;
+
+ if (!seek_head_.AddSeekEntry(libwebm::kMkvCluster, MaxOffset()))
+ return false;
+
+ if (output_cues_ && cues_track_ == 0) {
+ // Check for a video track
+ for (uint32_t i = 0; i < tracks_.track_entries_size(); ++i) {
+ const Track* const track = tracks_.GetTrackByIndex(i);
+ if (!track)
+ return false;
+
+ if (tracks_.TrackIsVideo(track->number())) {
+ cues_track_ = track->number();
+ break;
+ }
+ }
+
+ // Set first track found
+ if (cues_track_ == 0) {
+ const Track* const track = tracks_.GetTrackByIndex(0);
+ if (!track)
+ return false;
+
+ cues_track_ = track->number();
+ }
+ }
+ }
+ return true;
+}
+
+void Segment::UpdateDocTypeVersion() {
+ for (uint32_t index = 0; index < tracks_.track_entries_size(); ++index) {
+ const Track* track = tracks_.GetTrackByIndex(index);
+ if (track == NULL)
+ break;
+ if ((track->codec_delay() || track->seek_pre_roll()) &&
+ doc_type_version_ < 4) {
+ doc_type_version_ = 4;
+ break;
+ }
+ }
+}
+
+bool Segment::UpdateChunkName(const char* ext, char** name) const {
+ if (!name || !ext)
+ return false;
+
+ char ext_chk[64];
+#ifdef _MSC_VER
+ sprintf_s(ext_chk, sizeof(ext_chk), "_%06d.%s", chunk_count_, ext);
+#else
+ snprintf(ext_chk, sizeof(ext_chk), "_%06d.%s", chunk_count_, ext);
+#endif
+
+ const size_t length = strlen(chunking_base_name_) + strlen(ext_chk) + 1;
+ char* const str = new (std::nothrow) char[length]; // NOLINT
+ if (!str)
+ return false;
+
+#ifdef _MSC_VER
+ strcpy_s(str, length - strlen(ext_chk), chunking_base_name_);
+ strcat_s(str, length, ext_chk);
+#else
+ strcpy(str, chunking_base_name_);
+ strcat(str, ext_chk);
+#endif
+
+ delete[] * name;
+ *name = str;
+
+ return true;
+}
+
+int64_t Segment::MaxOffset() {
+ if (!writer_header_)
+ return -1;
+
+ int64_t offset = writer_header_->Position() - payload_pos_;
+
+ if (chunking_) {
+ for (int32_t i = 0; i < cluster_list_size_; ++i) {
+ Cluster* const cluster = cluster_list_[i];
+ offset += cluster->Size();
+ }
+
+ if (writer_cues_)
+ offset += writer_cues_->Position();
+ }
+
+ return offset;
+}
+
+bool Segment::QueueFrame(Frame* frame) {
+ const int32_t new_size = frames_size_ + 1;
+
+ if (new_size > frames_capacity_) {
+ // Add more frames.
+ const int32_t new_capacity = (!frames_capacity_) ? 2 : frames_capacity_ * 2;
+
+ if (new_capacity < 1)
+ return false;
+
+ Frame** const frames = new (std::nothrow) Frame*[new_capacity]; // NOLINT
+ if (!frames)
+ return false;
+
+ for (int32_t i = 0; i < frames_size_; ++i) {
+ frames[i] = frames_[i];
+ }
+
+ delete[] frames_;
+ frames_ = frames;
+ frames_capacity_ = new_capacity;
+ }
+
+ frames_[frames_size_++] = frame;
+
+ return true;
+}
+
+int Segment::WriteFramesAll() {
+ if (frames_ == NULL)
+ return 0;
+
+ if (cluster_list_size_ < 1)
+ return -1;
+
+ Cluster* const cluster = cluster_list_[cluster_list_size_ - 1];
+
+ if (!cluster)
+ return -1;
+
+ for (int32_t i = 0; i < frames_size_; ++i) {
+ Frame*& frame = frames_[i];
+ // TODO(jzern/vigneshv): using Segment::AddGenericFrame here would limit the
+ // places where |doc_type_version_| needs to be updated.
+ if (frame->discard_padding() != 0)
+ doc_type_version_ = 4;
+ if (!cluster->AddFrame(frame))
+ return -1;
+
+ if (new_cuepoint_ && cues_track_ == frame->track_number()) {
+ if (!AddCuePoint(frame->timestamp(), cues_track_))
+ return -1;
+ }
+
+ if (frame->timestamp() > last_timestamp_) {
+ last_timestamp_ = frame->timestamp();
+ last_track_timestamp_[frame->track_number() - 1] = frame->timestamp();
+ }
+
+ delete frame;
+ frame = NULL;
+ }
+
+ const int result = frames_size_;
+ frames_size_ = 0;
+
+ return result;
+}
+
+bool Segment::WriteFramesLessThan(uint64_t timestamp) {
+ // Check |cluster_list_size_| to see if this is the first cluster. If it is
+ // the first cluster the audio frames that are less than the first video
+ // timesatmp will be written in a later step.
+ if (frames_size_ > 0 && cluster_list_size_ > 0) {
+ if (!frames_)
+ return false;
+
+ Cluster* const cluster = cluster_list_[cluster_list_size_ - 1];
+ if (!cluster)
+ return false;
+
+ int32_t shift_left = 0;
+
+ // TODO(fgalligan): Change this to use the durations of frames instead of
+ // the next frame's start time if the duration is accurate.
+ for (int32_t i = 1; i < frames_size_; ++i) {
+ const Frame* const frame_curr = frames_[i];
+
+ if (frame_curr->timestamp() > timestamp)
+ break;
+
+ const Frame* const frame_prev = frames_[i - 1];
+ if (frame_prev->discard_padding() != 0)
+ doc_type_version_ = 4;
+ if (!cluster->AddFrame(frame_prev))
+ return false;
+
+ if (new_cuepoint_ && cues_track_ == frame_prev->track_number()) {
+ if (!AddCuePoint(frame_prev->timestamp(), cues_track_))
+ return false;
+ }
+
+ ++shift_left;
+ if (frame_prev->timestamp() > last_timestamp_) {
+ last_timestamp_ = frame_prev->timestamp();
+ last_track_timestamp_[frame_prev->track_number() - 1] =
+ frame_prev->timestamp();
+ }
+
+ delete frame_prev;
+ }
+
+ if (shift_left > 0) {
+ if (shift_left >= frames_size_)
+ return false;
+
+ const int32_t new_frames_size = frames_size_ - shift_left;
+ for (int32_t i = 0; i < new_frames_size; ++i) {
+ frames_[i] = frames_[i + shift_left];
+ }
+
+ frames_size_ = new_frames_size;
+ }
+ }
+
+ return true;
+}
+
+bool Segment::DocTypeIsWebm() const {
+ const int kNumCodecIds = 10;
+
+ // TODO(vigneshv): Tweak .clang-format.
+ const char* kWebmCodecIds[kNumCodecIds] = {
+ Tracks::kOpusCodecId, Tracks::kVorbisCodecId,
+ Tracks::kVp8CodecId, Tracks::kVp9CodecId,
+ Tracks::kVp10CodecId, Tracks::kAV1CodecId,
+ Tracks::kWebVttCaptionsId, Tracks::kWebVttDescriptionsId,
+ Tracks::kWebVttMetadataId, Tracks::kWebVttSubtitlesId};
+
+ const int num_tracks = static_cast<int>(tracks_.track_entries_size());
+ for (int track_index = 0; track_index < num_tracks; ++track_index) {
+ const Track* const track = tracks_.GetTrackByIndex(track_index);
+ const std::string codec_id = track->codec_id();
+
+ bool id_is_webm = false;
+ for (int id_index = 0; id_index < kNumCodecIds; ++id_index) {
+ if (codec_id == kWebmCodecIds[id_index]) {
+ id_is_webm = true;
+ break;
+ }
+ }
+
+ if (!id_is_webm)
+ return false;
+ }
+
+ return true;
+}
+
+} // namespace mkvmuxer
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.h b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.h
new file mode 100644
index 000000000..9e817bced
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxer.h
@@ -0,0 +1,1922 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#ifndef MKVMUXER_MKVMUXER_H_
+#define MKVMUXER_MKVMUXER_H_
+
+#include <stdint.h>
+
+#include <cstddef>
+#include <list>
+#include <map>
+
+#include "common/webmids.h"
+#include "mkvmuxer/mkvmuxertypes.h"
+
+// For a description of the WebM elements see
+// http://www.webmproject.org/code/specs/container/.
+
+namespace mkvparser {
+class IMkvReader;
+} // namespace mkvparser
+
+namespace mkvmuxer {
+
+class MkvWriter;
+class Segment;
+
+const uint64_t kMaxTrackNumber = 126;
+
+///////////////////////////////////////////////////////////////
+// Interface used by the mkvmuxer to write out the Mkv data.
+class IMkvWriter {
+ public:
+ // Writes out |len| bytes of |buf|. Returns 0 on success.
+ virtual int32 Write(const void* buf, uint32 len) = 0;
+
+ // Returns the offset of the output position from the beginning of the
+ // output.
+ virtual int64 Position() const = 0;
+
+ // Set the current File position. Returns 0 on success.
+ virtual int32 Position(int64 position) = 0;
+
+ // Returns true if the writer is seekable.
+ virtual bool Seekable() const = 0;
+
+ // Element start notification. Called whenever an element identifier is about
+ // to be written to the stream. |element_id| is the element identifier, and
+ // |position| is the location in the WebM stream where the first octet of the
+ // element identifier will be written.
+ // Note: the |MkvId| enumeration in webmids.hpp defines element values.
+ virtual void ElementStartNotify(uint64 element_id, int64 position) = 0;
+
+ protected:
+ IMkvWriter();
+ virtual ~IMkvWriter();
+
+ private:
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(IMkvWriter);
+};
+
+// Writes out the EBML header for a WebM file, but allows caller to specify
+// DocType. This function must be called before any other libwebm writing
+// functions are called.
+bool WriteEbmlHeader(IMkvWriter* writer, uint64_t doc_type_version,
+ const char* const doc_type);
+
+// Writes out the EBML header for a WebM file. This function must be called
+// before any other libwebm writing functions are called.
+bool WriteEbmlHeader(IMkvWriter* writer, uint64_t doc_type_version);
+
+// Deprecated. Writes out EBML header with doc_type_version as
+// kDefaultDocTypeVersion. Exists for backward compatibility.
+bool WriteEbmlHeader(IMkvWriter* writer);
+
+// Copies in Chunk from source to destination between the given byte positions
+bool ChunkedCopy(mkvparser::IMkvReader* source, IMkvWriter* dst, int64_t start,
+ int64_t size);
+
+///////////////////////////////////////////////////////////////
+// Class to hold data the will be written to a block.
+class Frame {
+ public:
+ Frame();
+ ~Frame();
+
+ // Sets this frame's contents based on |frame|. Returns true on success. On
+ // failure, this frame's existing contents may be lost.
+ bool CopyFrom(const Frame& frame);
+
+ // Copies |frame| data into |frame_|. Returns true on success.
+ bool Init(const uint8_t* frame, uint64_t length);
+
+ // Copies |additional| data into |additional_|. Returns true on success.
+ bool AddAdditionalData(const uint8_t* additional, uint64_t length,
+ uint64_t add_id);
+
+ // Returns true if the frame has valid parameters.
+ bool IsValid() const;
+
+ // Returns true if the frame can be written as a SimpleBlock based on current
+ // parameters.
+ bool CanBeSimpleBlock() const;
+
+ uint64_t add_id() const { return add_id_; }
+ const uint8_t* additional() const { return additional_; }
+ uint64_t additional_length() const { return additional_length_; }
+ void set_duration(uint64_t duration);
+ uint64_t duration() const { return duration_; }
+ bool duration_set() const { return duration_set_; }
+ const uint8_t* frame() const { return frame_; }
+ void set_is_key(bool key) { is_key_ = key; }
+ bool is_key() const { return is_key_; }
+ uint64_t length() const { return length_; }
+ void set_track_number(uint64_t track_number) { track_number_ = track_number; }
+ uint64_t track_number() const { return track_number_; }
+ void set_timestamp(uint64_t timestamp) { timestamp_ = timestamp; }
+ uint64_t timestamp() const { return timestamp_; }
+ void set_discard_padding(int64_t discard_padding) {
+ discard_padding_ = discard_padding;
+ }
+ int64_t discard_padding() const { return discard_padding_; }
+ void set_reference_block_timestamp(int64_t reference_block_timestamp);
+ int64_t reference_block_timestamp() const {
+ return reference_block_timestamp_;
+ }
+ bool reference_block_timestamp_set() const {
+ return reference_block_timestamp_set_;
+ }
+
+ private:
+ // Id of the Additional data.
+ uint64_t add_id_;
+
+ // Pointer to additional data. Owned by this class.
+ uint8_t* additional_;
+
+ // Length of the additional data.
+ uint64_t additional_length_;
+
+ // Duration of the frame in nanoseconds.
+ uint64_t duration_;
+
+ // Flag indicating that |duration_| has been set. Setting duration causes the
+ // frame to be written out as a Block with BlockDuration instead of as a
+ // SimpleBlock.
+ bool duration_set_;
+
+ // Pointer to the data. Owned by this class.
+ uint8_t* frame_;
+
+ // Flag telling if the data should set the key flag of a block.
+ bool is_key_;
+
+ // Length of the data.
+ uint64_t length_;
+
+ // Mkv track number the data is associated with.
+ uint64_t track_number_;
+
+ // Timestamp of the data in nanoseconds.
+ uint64_t timestamp_;
+
+ // Discard padding for the frame.
+ int64_t discard_padding_;
+
+ // Reference block timestamp.
+ int64_t reference_block_timestamp_;
+
+ // Flag indicating if |reference_block_timestamp_| has been set.
+ bool reference_block_timestamp_set_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Frame);
+};
+
+///////////////////////////////////////////////////////////////
+// Class to hold one cue point in a Cues element.
+class CuePoint {
+ public:
+ CuePoint();
+ ~CuePoint();
+
+ // Returns the size in bytes for the entire CuePoint element.
+ uint64_t Size() const;
+
+ // Output the CuePoint element to the writer. Returns true on success.
+ bool Write(IMkvWriter* writer) const;
+
+ void set_time(uint64_t time) { time_ = time; }
+ uint64_t time() const { return time_; }
+ void set_track(uint64_t track) { track_ = track; }
+ uint64_t track() const { return track_; }
+ void set_cluster_pos(uint64_t cluster_pos) { cluster_pos_ = cluster_pos; }
+ uint64_t cluster_pos() const { return cluster_pos_; }
+ void set_block_number(uint64_t block_number) { block_number_ = block_number; }
+ uint64_t block_number() const { return block_number_; }
+ void set_output_block_number(bool output_block_number) {
+ output_block_number_ = output_block_number;
+ }
+ bool output_block_number() const { return output_block_number_; }
+
+ private:
+ // Returns the size in bytes for the payload of the CuePoint element.
+ uint64_t PayloadSize() const;
+
+ // Absolute timecode according to the segment time base.
+ uint64_t time_;
+
+ // The Track element associated with the CuePoint.
+ uint64_t track_;
+
+ // The position of the Cluster containing the Block.
+ uint64_t cluster_pos_;
+
+ // Number of the Block within the Cluster, starting from 1.
+ uint64_t block_number_;
+
+ // If true the muxer will write out the block number for the cue if the
+ // block number is different than the default of 1. Default is set to true.
+ bool output_block_number_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(CuePoint);
+};
+
+///////////////////////////////////////////////////////////////
+// Cues element.
+class Cues {
+ public:
+ Cues();
+ ~Cues();
+
+ // Adds a cue point to the Cues element. Returns true on success.
+ bool AddCue(CuePoint* cue);
+
+ // Returns the cue point by index. Returns NULL if there is no cue point
+ // match.
+ CuePoint* GetCueByIndex(int32_t index) const;
+
+ // Returns the total size of the Cues element
+ uint64_t Size();
+
+ // Output the Cues element to the writer. Returns true on success.
+ bool Write(IMkvWriter* writer) const;
+
+ int32_t cue_entries_size() const { return cue_entries_size_; }
+ void set_output_block_number(bool output_block_number) {
+ output_block_number_ = output_block_number;
+ }
+ bool output_block_number() const { return output_block_number_; }
+
+ private:
+ // Number of allocated elements in |cue_entries_|.
+ int32_t cue_entries_capacity_;
+
+ // Number of CuePoints in |cue_entries_|.
+ int32_t cue_entries_size_;
+
+ // CuePoint list.
+ CuePoint** cue_entries_;
+
+ // If true the muxer will write out the block number for the cue if the
+ // block number is different than the default of 1. Default is set to true.
+ bool output_block_number_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Cues);
+};
+
+///////////////////////////////////////////////////////////////
+// ContentEncAESSettings element
+class ContentEncAESSettings {
+ public:
+ enum { kCTR = 1 };
+
+ ContentEncAESSettings();
+ ~ContentEncAESSettings() {}
+
+ // Returns the size in bytes for the ContentEncAESSettings element.
+ uint64_t Size() const;
+
+ // Writes out the ContentEncAESSettings element to |writer|. Returns true on
+ // success.
+ bool Write(IMkvWriter* writer) const;
+
+ uint64_t cipher_mode() const { return cipher_mode_; }
+
+ private:
+ // Returns the size in bytes for the payload of the ContentEncAESSettings
+ // element.
+ uint64_t PayloadSize() const;
+
+ // Sub elements
+ uint64_t cipher_mode_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(ContentEncAESSettings);
+};
+
+///////////////////////////////////////////////////////////////
+// ContentEncoding element
+// Elements used to describe if the track data has been encrypted or
+// compressed with zlib or header stripping.
+// Currently only whole frames can be encrypted with AES. This dictates that
+// ContentEncodingOrder will be 0, ContentEncodingScope will be 1,
+// ContentEncodingType will be 1, and ContentEncAlgo will be 5.
+class ContentEncoding {
+ public:
+ ContentEncoding();
+ ~ContentEncoding();
+
+ // Sets the content encryption id. Copies |length| bytes from |id| to
+ // |enc_key_id_|. Returns true on success.
+ bool SetEncryptionID(const uint8_t* id, uint64_t length);
+
+ // Returns the size in bytes for the ContentEncoding element.
+ uint64_t Size() const;
+
+ // Writes out the ContentEncoding element to |writer|. Returns true on
+ // success.
+ bool Write(IMkvWriter* writer) const;
+
+ uint64_t enc_algo() const { return enc_algo_; }
+ uint64_t encoding_order() const { return encoding_order_; }
+ uint64_t encoding_scope() const { return encoding_scope_; }
+ uint64_t encoding_type() const { return encoding_type_; }
+ ContentEncAESSettings* enc_aes_settings() { return &enc_aes_settings_; }
+
+ private:
+ // Returns the size in bytes for the encoding elements.
+ uint64_t EncodingSize(uint64_t compresion_size,
+ uint64_t encryption_size) const;
+
+ // Returns the size in bytes for the encryption elements.
+ uint64_t EncryptionSize() const;
+
+ // Track element names
+ uint64_t enc_algo_;
+ uint8_t* enc_key_id_;
+ uint64_t encoding_order_;
+ uint64_t encoding_scope_;
+ uint64_t encoding_type_;
+
+ // ContentEncAESSettings element.
+ ContentEncAESSettings enc_aes_settings_;
+
+ // Size of the ContentEncKeyID data in bytes.
+ uint64_t enc_key_id_length_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(ContentEncoding);
+};
+
+///////////////////////////////////////////////////////////////
+// Colour element.
+class PrimaryChromaticity {
+ public:
+ static const float kChromaticityMin;
+ static const float kChromaticityMax;
+
+ PrimaryChromaticity(float x_val, float y_val) : x_(x_val), y_(y_val) {}
+ PrimaryChromaticity() : x_(0), y_(0) {}
+ ~PrimaryChromaticity() {}
+
+ // Returns sum of |x_id| and |y_id| element id sizes and payload sizes.
+ uint64_t PrimaryChromaticitySize(libwebm::MkvId x_id,
+ libwebm::MkvId y_id) const;
+ bool Valid() const;
+ bool Write(IMkvWriter* writer, libwebm::MkvId x_id,
+ libwebm::MkvId y_id) const;
+
+ float x() const { return x_; }
+ void set_x(float new_x) { x_ = new_x; }
+ float y() const { return y_; }
+ void set_y(float new_y) { y_ = new_y; }
+
+ private:
+ float x_;
+ float y_;
+};
+
+class MasteringMetadata {
+ public:
+ static const float kValueNotPresent;
+ static const float kMinLuminance;
+ static const float kMinLuminanceMax;
+ static const float kMaxLuminanceMax;
+
+ MasteringMetadata()
+ : luminance_max_(kValueNotPresent),
+ luminance_min_(kValueNotPresent),
+ r_(NULL),
+ g_(NULL),
+ b_(NULL),
+ white_point_(NULL) {}
+ ~MasteringMetadata() {
+ delete r_;
+ delete g_;
+ delete b_;
+ delete white_point_;
+ }
+
+ // Returns total size of the MasteringMetadata element.
+ uint64_t MasteringMetadataSize() const;
+ bool Valid() const;
+ bool Write(IMkvWriter* writer) const;
+
+ // Copies non-null chromaticity.
+ bool SetChromaticity(const PrimaryChromaticity* r,
+ const PrimaryChromaticity* g,
+ const PrimaryChromaticity* b,
+ const PrimaryChromaticity* white_point);
+ const PrimaryChromaticity* r() const { return r_; }
+ const PrimaryChromaticity* g() const { return g_; }
+ const PrimaryChromaticity* b() const { return b_; }
+ const PrimaryChromaticity* white_point() const { return white_point_; }
+
+ float luminance_max() const { return luminance_max_; }
+ void set_luminance_max(float luminance_max) {
+ luminance_max_ = luminance_max;
+ }
+ float luminance_min() const { return luminance_min_; }
+ void set_luminance_min(float luminance_min) {
+ luminance_min_ = luminance_min;
+ }
+
+ private:
+ // Returns size of MasteringMetadata child elements.
+ uint64_t PayloadSize() const;
+
+ float luminance_max_;
+ float luminance_min_;
+ PrimaryChromaticity* r_;
+ PrimaryChromaticity* g_;
+ PrimaryChromaticity* b_;
+ PrimaryChromaticity* white_point_;
+};
+
+class Colour {
+ public:
+ enum MatrixCoefficients {
+ kGbr = 0,
+ kBt709 = 1,
+ kUnspecifiedMc = 2,
+ kReserved = 3,
+ kFcc = 4,
+ kBt470bg = 5,
+ kSmpte170MMc = 6,
+ kSmpte240MMc = 7,
+ kYcocg = 8,
+ kBt2020NonConstantLuminance = 9,
+ kBt2020ConstantLuminance = 10,
+ };
+ enum ChromaSitingHorz {
+ kUnspecifiedCsh = 0,
+ kLeftCollocated = 1,
+ kHalfCsh = 2,
+ };
+ enum ChromaSitingVert {
+ kUnspecifiedCsv = 0,
+ kTopCollocated = 1,
+ kHalfCsv = 2,
+ };
+ enum Range {
+ kUnspecifiedCr = 0,
+ kBroadcastRange = 1,
+ kFullRange = 2,
+ kMcTcDefined = 3, // Defined by MatrixCoefficients/TransferCharacteristics.
+ };
+ enum TransferCharacteristics {
+ kIturBt709Tc = 1,
+ kUnspecifiedTc = 2,
+ kReservedTc = 3,
+ kGamma22Curve = 4,
+ kGamma28Curve = 5,
+ kSmpte170MTc = 6,
+ kSmpte240MTc = 7,
+ kLinear = 8,
+ kLog = 9,
+ kLogSqrt = 10,
+ kIec6196624 = 11,
+ kIturBt1361ExtendedColourGamut = 12,
+ kIec6196621 = 13,
+ kIturBt202010bit = 14,
+ kIturBt202012bit = 15,
+ kSmpteSt2084 = 16,
+ kSmpteSt4281Tc = 17,
+ kAribStdB67Hlg = 18,
+ };
+ enum Primaries {
+ kReservedP0 = 0,
+ kIturBt709P = 1,
+ kUnspecifiedP = 2,
+ kReservedP3 = 3,
+ kIturBt470M = 4,
+ kIturBt470Bg = 5,
+ kSmpte170MP = 6,
+ kSmpte240MP = 7,
+ kFilm = 8,
+ kIturBt2020 = 9,
+ kSmpteSt4281P = 10,
+ kJedecP22Phosphors = 22,
+ };
+ static const uint64_t kValueNotPresent;
+ Colour()
+ : matrix_coefficients_(kValueNotPresent),
+ bits_per_channel_(kValueNotPresent),
+ chroma_subsampling_horz_(kValueNotPresent),
+ chroma_subsampling_vert_(kValueNotPresent),
+ cb_subsampling_horz_(kValueNotPresent),
+ cb_subsampling_vert_(kValueNotPresent),
+ chroma_siting_horz_(kValueNotPresent),
+ chroma_siting_vert_(kValueNotPresent),
+ range_(kValueNotPresent),
+ transfer_characteristics_(kValueNotPresent),
+ primaries_(kValueNotPresent),
+ max_cll_(kValueNotPresent),
+ max_fall_(kValueNotPresent),
+ mastering_metadata_(NULL) {}
+ ~Colour() { delete mastering_metadata_; }
+
+ // Returns total size of the Colour element.
+ uint64_t ColourSize() const;
+ bool Valid() const;
+ bool Write(IMkvWriter* writer) const;
+
+ // Deep copies |mastering_metadata|.
+ bool SetMasteringMetadata(const MasteringMetadata& mastering_metadata);
+
+ const MasteringMetadata* mastering_metadata() const {
+ return mastering_metadata_;
+ }
+
+ uint64_t matrix_coefficients() const { return matrix_coefficients_; }
+ void set_matrix_coefficients(uint64_t matrix_coefficients) {
+ matrix_coefficients_ = matrix_coefficients;
+ }
+ uint64_t bits_per_channel() const { return bits_per_channel_; }
+ void set_bits_per_channel(uint64_t bits_per_channel) {
+ bits_per_channel_ = bits_per_channel;
+ }
+ uint64_t chroma_subsampling_horz() const { return chroma_subsampling_horz_; }
+ void set_chroma_subsampling_horz(uint64_t chroma_subsampling_horz) {
+ chroma_subsampling_horz_ = chroma_subsampling_horz;
+ }
+ uint64_t chroma_subsampling_vert() const { return chroma_subsampling_vert_; }
+ void set_chroma_subsampling_vert(uint64_t chroma_subsampling_vert) {
+ chroma_subsampling_vert_ = chroma_subsampling_vert;
+ }
+ uint64_t cb_subsampling_horz() const { return cb_subsampling_horz_; }
+ void set_cb_subsampling_horz(uint64_t cb_subsampling_horz) {
+ cb_subsampling_horz_ = cb_subsampling_horz;
+ }
+ uint64_t cb_subsampling_vert() const { return cb_subsampling_vert_; }
+ void set_cb_subsampling_vert(uint64_t cb_subsampling_vert) {
+ cb_subsampling_vert_ = cb_subsampling_vert;
+ }
+ uint64_t chroma_siting_horz() const { return chroma_siting_horz_; }
+ void set_chroma_siting_horz(uint64_t chroma_siting_horz) {
+ chroma_siting_horz_ = chroma_siting_horz;
+ }
+ uint64_t chroma_siting_vert() const { return chroma_siting_vert_; }
+ void set_chroma_siting_vert(uint64_t chroma_siting_vert) {
+ chroma_siting_vert_ = chroma_siting_vert;
+ }
+ uint64_t range() const { return range_; }
+ void set_range(uint64_t range) { range_ = range; }
+ uint64_t transfer_characteristics() const {
+ return transfer_characteristics_;
+ }
+ void set_transfer_characteristics(uint64_t transfer_characteristics) {
+ transfer_characteristics_ = transfer_characteristics;
+ }
+ uint64_t primaries() const { return primaries_; }
+ void set_primaries(uint64_t primaries) { primaries_ = primaries; }
+ uint64_t max_cll() const { return max_cll_; }
+ void set_max_cll(uint64_t max_cll) { max_cll_ = max_cll; }
+ uint64_t max_fall() const { return max_fall_; }
+ void set_max_fall(uint64_t max_fall) { max_fall_ = max_fall; }
+
+ private:
+ // Returns size of Colour child elements.
+ uint64_t PayloadSize() const;
+
+ uint64_t matrix_coefficients_;
+ uint64_t bits_per_channel_;
+ uint64_t chroma_subsampling_horz_;
+ uint64_t chroma_subsampling_vert_;
+ uint64_t cb_subsampling_horz_;
+ uint64_t cb_subsampling_vert_;
+ uint64_t chroma_siting_horz_;
+ uint64_t chroma_siting_vert_;
+ uint64_t range_;
+ uint64_t transfer_characteristics_;
+ uint64_t primaries_;
+ uint64_t max_cll_;
+ uint64_t max_fall_;
+
+ MasteringMetadata* mastering_metadata_;
+};
+
+///////////////////////////////////////////////////////////////
+// Projection element.
+class Projection {
+ public:
+ enum ProjectionType {
+ kTypeNotPresent = -1,
+ kRectangular = 0,
+ kEquirectangular = 1,
+ kCubeMap = 2,
+ kMesh = 3,
+ };
+ static const uint64_t kValueNotPresent;
+ Projection()
+ : type_(kRectangular),
+ pose_yaw_(0.0),
+ pose_pitch_(0.0),
+ pose_roll_(0.0),
+ private_data_(NULL),
+ private_data_length_(0) {}
+ ~Projection() { delete[] private_data_; }
+
+ uint64_t ProjectionSize() const;
+ bool Write(IMkvWriter* writer) const;
+
+ bool SetProjectionPrivate(const uint8_t* private_data,
+ uint64_t private_data_length);
+
+ ProjectionType type() const { return type_; }
+ void set_type(ProjectionType type) { type_ = type; }
+ float pose_yaw() const { return pose_yaw_; }
+ void set_pose_yaw(float pose_yaw) { pose_yaw_ = pose_yaw; }
+ float pose_pitch() const { return pose_pitch_; }
+ void set_pose_pitch(float pose_pitch) { pose_pitch_ = pose_pitch; }
+ float pose_roll() const { return pose_roll_; }
+ void set_pose_roll(float pose_roll) { pose_roll_ = pose_roll; }
+ uint8_t* private_data() const { return private_data_; }
+ uint64_t private_data_length() const { return private_data_length_; }
+
+ private:
+ // Returns size of VideoProjection child elements.
+ uint64_t PayloadSize() const;
+
+ ProjectionType type_;
+ float pose_yaw_;
+ float pose_pitch_;
+ float pose_roll_;
+ uint8_t* private_data_;
+ uint64_t private_data_length_;
+};
+
+///////////////////////////////////////////////////////////////
+// Track element.
+class Track {
+ public:
+ // The |seed| parameter is used to synthesize a UID for the track.
+ explicit Track(unsigned int* seed);
+ virtual ~Track();
+
+ // Adds a ContentEncoding element to the Track. Returns true on success.
+ virtual bool AddContentEncoding();
+
+ // Returns the ContentEncoding by index. Returns NULL if there is no
+ // ContentEncoding match.
+ ContentEncoding* GetContentEncodingByIndex(uint32_t index) const;
+
+ // Returns the size in bytes for the payload of the Track element.
+ virtual uint64_t PayloadSize() const;
+
+ // Returns the size in bytes of the Track element.
+ virtual uint64_t Size() const;
+
+ // Output the Track element to the writer. Returns true on success.
+ virtual bool Write(IMkvWriter* writer) const;
+
+ // Sets the CodecPrivate element of the Track element. Copies |length|
+ // bytes from |codec_private| to |codec_private_|. Returns true on success.
+ bool SetCodecPrivate(const uint8_t* codec_private, uint64_t length);
+
+ void set_codec_id(const char* codec_id);
+ const char* codec_id() const { return codec_id_; }
+ const uint8_t* codec_private() const { return codec_private_; }
+ void set_language(const char* language);
+ const char* language() const { return language_; }
+ void set_max_block_additional_id(uint64_t max_block_additional_id) {
+ max_block_additional_id_ = max_block_additional_id;
+ }
+ uint64_t max_block_additional_id() const { return max_block_additional_id_; }
+ void set_name(const char* name);
+ const char* name() const { return name_; }
+ void set_number(uint64_t number) { number_ = number; }
+ uint64_t number() const { return number_; }
+ void set_type(uint64_t type) { type_ = type; }
+ uint64_t type() const { return type_; }
+ void set_uid(uint64_t uid) { uid_ = uid; }
+ uint64_t uid() const { return uid_; }
+ void set_codec_delay(uint64_t codec_delay) { codec_delay_ = codec_delay; }
+ uint64_t codec_delay() const { return codec_delay_; }
+ void set_seek_pre_roll(uint64_t seek_pre_roll) {
+ seek_pre_roll_ = seek_pre_roll;
+ }
+ uint64_t seek_pre_roll() const { return seek_pre_roll_; }
+ void set_default_duration(uint64_t default_duration) {
+ default_duration_ = default_duration;
+ }
+ uint64_t default_duration() const { return default_duration_; }
+
+ uint64_t codec_private_length() const { return codec_private_length_; }
+ uint32_t content_encoding_entries_size() const {
+ return content_encoding_entries_size_;
+ }
+
+ private:
+ // Track element names.
+ char* codec_id_;
+ uint8_t* codec_private_;
+ char* language_;
+ uint64_t max_block_additional_id_;
+ char* name_;
+ uint64_t number_;
+ uint64_t type_;
+ uint64_t uid_;
+ uint64_t codec_delay_;
+ uint64_t seek_pre_roll_;
+ uint64_t default_duration_;
+
+ // Size of the CodecPrivate data in bytes.
+ uint64_t codec_private_length_;
+
+ // ContentEncoding element list.
+ ContentEncoding** content_encoding_entries_;
+
+ // Number of ContentEncoding elements added.
+ uint32_t content_encoding_entries_size_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Track);
+};
+
+///////////////////////////////////////////////////////////////
+// Track that has video specific elements.
+class VideoTrack : public Track {
+ public:
+ // Supported modes for stereo 3D.
+ enum StereoMode {
+ kMono = 0,
+ kSideBySideLeftIsFirst = 1,
+ kTopBottomRightIsFirst = 2,
+ kTopBottomLeftIsFirst = 3,
+ kSideBySideRightIsFirst = 11
+ };
+
+ enum AlphaMode { kNoAlpha = 0, kAlpha = 1 };
+
+ // The |seed| parameter is used to synthesize a UID for the track.
+ explicit VideoTrack(unsigned int* seed);
+ virtual ~VideoTrack();
+
+ // Returns the size in bytes for the payload of the Track element plus the
+ // video specific elements.
+ virtual uint64_t PayloadSize() const;
+
+ // Output the VideoTrack element to the writer. Returns true on success.
+ virtual bool Write(IMkvWriter* writer) const;
+
+ // Sets the video's stereo mode. Returns true on success.
+ bool SetStereoMode(uint64_t stereo_mode);
+
+ // Sets the video's alpha mode. Returns true on success.
+ bool SetAlphaMode(uint64_t alpha_mode);
+
+ void set_display_height(uint64_t height) { display_height_ = height; }
+ uint64_t display_height() const { return display_height_; }
+ void set_display_width(uint64_t width) { display_width_ = width; }
+ uint64_t display_width() const { return display_width_; }
+ void set_pixel_height(uint64_t height) { pixel_height_ = height; }
+ uint64_t pixel_height() const { return pixel_height_; }
+ void set_pixel_width(uint64_t width) { pixel_width_ = width; }
+ uint64_t pixel_width() const { return pixel_width_; }
+
+ void set_crop_left(uint64_t crop_left) { crop_left_ = crop_left; }
+ uint64_t crop_left() const { return crop_left_; }
+ void set_crop_right(uint64_t crop_right) { crop_right_ = crop_right; }
+ uint64_t crop_right() const { return crop_right_; }
+ void set_crop_top(uint64_t crop_top) { crop_top_ = crop_top; }
+ uint64_t crop_top() const { return crop_top_; }
+ void set_crop_bottom(uint64_t crop_bottom) { crop_bottom_ = crop_bottom; }
+ uint64_t crop_bottom() const { return crop_bottom_; }
+
+ void set_frame_rate(double frame_rate) { frame_rate_ = frame_rate; }
+ double frame_rate() const { return frame_rate_; }
+ void set_height(uint64_t height) { height_ = height; }
+ uint64_t height() const { return height_; }
+ uint64_t stereo_mode() { return stereo_mode_; }
+ uint64_t alpha_mode() { return alpha_mode_; }
+ void set_width(uint64_t width) { width_ = width; }
+ uint64_t width() const { return width_; }
+
+ Colour* colour() { return colour_; }
+
+ // Deep copies |colour|.
+ bool SetColour(const Colour& colour);
+
+ Projection* projection() { return projection_; }
+
+ // Deep copies |projection|.
+ bool SetProjection(const Projection& projection);
+
+ private:
+ // Returns the size in bytes of the Video element.
+ uint64_t VideoPayloadSize() const;
+
+ // Video track element names.
+ uint64_t display_height_;
+ uint64_t display_width_;
+ uint64_t pixel_height_;
+ uint64_t pixel_width_;
+ uint64_t crop_left_;
+ uint64_t crop_right_;
+ uint64_t crop_top_;
+ uint64_t crop_bottom_;
+ double frame_rate_;
+ uint64_t height_;
+ uint64_t stereo_mode_;
+ uint64_t alpha_mode_;
+ uint64_t width_;
+
+ Colour* colour_;
+ Projection* projection_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(VideoTrack);
+};
+
+///////////////////////////////////////////////////////////////
+// Track that has audio specific elements.
+class AudioTrack : public Track {
+ public:
+ // The |seed| parameter is used to synthesize a UID for the track.
+ explicit AudioTrack(unsigned int* seed);
+ virtual ~AudioTrack();
+
+ // Returns the size in bytes for the payload of the Track element plus the
+ // audio specific elements.
+ virtual uint64_t PayloadSize() const;
+
+ // Output the AudioTrack element to the writer. Returns true on success.
+ virtual bool Write(IMkvWriter* writer) const;
+
+ void set_bit_depth(uint64_t bit_depth) { bit_depth_ = bit_depth; }
+ uint64_t bit_depth() const { return bit_depth_; }
+ void set_channels(uint64_t channels) { channels_ = channels; }
+ uint64_t channels() const { return channels_; }
+ void set_sample_rate(double sample_rate) { sample_rate_ = sample_rate; }
+ double sample_rate() const { return sample_rate_; }
+
+ private:
+ // Audio track element names.
+ uint64_t bit_depth_;
+ uint64_t channels_;
+ double sample_rate_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(AudioTrack);
+};
+
+///////////////////////////////////////////////////////////////
+// Tracks element
+class Tracks {
+ public:
+ // Audio and video type defined by the Matroska specs.
+ enum { kVideo = 0x1, kAudio = 0x2 };
+
+ static const char kOpusCodecId[];
+ static const char kVorbisCodecId[];
+ static const char kVp8CodecId[];
+ static const char kVp9CodecId[];
+ static const char kVp10CodecId[];
+ static const char kAV1CodecId[];
+ static const char kWebVttCaptionsId[];
+ static const char kWebVttDescriptionsId[];
+ static const char kWebVttMetadataId[];
+ static const char kWebVttSubtitlesId[];
+
+ Tracks();
+ ~Tracks();
+
+ // Adds a Track element to the Tracks object. |track| will be owned and
+ // deleted by the Tracks object. Returns true on success. |number| is the
+ // number to use for the track. |number| must be >= 0. If |number| == 0
+ // then the muxer will decide on the track number.
+ bool AddTrack(Track* track, int32_t number);
+
+ // Returns the track by index. Returns NULL if there is no track match.
+ const Track* GetTrackByIndex(uint32_t idx) const;
+
+ // Search the Tracks and return the track that matches |tn|. Returns NULL
+ // if there is no track match.
+ Track* GetTrackByNumber(uint64_t track_number) const;
+
+ // Returns true if the track number is an audio track.
+ bool TrackIsAudio(uint64_t track_number) const;
+
+ // Returns true if the track number is a video track.
+ bool TrackIsVideo(uint64_t track_number) const;
+
+ // Output the Tracks element to the writer. Returns true on success.
+ bool Write(IMkvWriter* writer) const;
+
+ uint32_t track_entries_size() const { return track_entries_size_; }
+
+ private:
+ // Track element list.
+ Track** track_entries_;
+
+ // Number of Track elements added.
+ uint32_t track_entries_size_;
+
+ // Whether or not Tracks element has already been written via IMkvWriter.
+ mutable bool wrote_tracks_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Tracks);
+};
+
+///////////////////////////////////////////////////////////////
+// Chapter element
+//
+class Chapter {
+ public:
+ // Set the identifier for this chapter. (This corresponds to the
+ // Cue Identifier line in WebVTT.)
+ // TODO(matthewjheaney): the actual serialization of this item in
+ // MKV is pending.
+ bool set_id(const char* id);
+
+ // Converts the nanosecond start and stop times of this chapter to
+ // their corresponding timecode values, and stores them that way.
+ void set_time(const Segment& segment, uint64_t start_time_ns,
+ uint64_t end_time_ns);
+
+ // Sets the uid for this chapter. Primarily used to enable
+ // deterministic output from the muxer.
+ void set_uid(const uint64_t uid) { uid_ = uid; }
+
+ // Add a title string to this chapter, per the semantics described
+ // here:
+ // http://www.matroska.org/technical/specs/index.html
+ //
+ // The title ("chapter string") is a UTF-8 string.
+ //
+ // The language has ISO 639-2 representation, described here:
+ // http://www.loc.gov/standards/iso639-2/englangn.html
+ // http://www.loc.gov/standards/iso639-2/php/English_list.php
+ // If you specify NULL as the language value, this implies
+ // English ("eng").
+ //
+ // The country value corresponds to the codes listed here:
+ // http://www.iana.org/domains/root/db/
+ //
+ // The function returns false if the string could not be allocated.
+ bool add_string(const char* title, const char* language, const char* country);
+
+ private:
+ friend class Chapters;
+
+ // For storage of chapter titles that differ by language.
+ class Display {
+ public:
+ // Establish representation invariant for new Display object.
+ void Init();
+
+ // Reclaim resources, in anticipation of destruction.
+ void Clear();
+
+ // Copies the title to the |title_| member. Returns false on
+ // error.
+ bool set_title(const char* title);
+
+ // Copies the language to the |language_| member. Returns false
+ // on error.
+ bool set_language(const char* language);
+
+ // Copies the country to the |country_| member. Returns false on
+ // error.
+ bool set_country(const char* country);
+
+ // If |writer| is non-NULL, serialize the Display sub-element of
+ // the Atom into the stream. Returns the Display element size on
+ // success, 0 if error.
+ uint64_t WriteDisplay(IMkvWriter* writer) const;
+
+ private:
+ char* title_;
+ char* language_;
+ char* country_;
+ };
+
+ Chapter();
+ ~Chapter();
+
+ // Establish the representation invariant for a newly-created
+ // Chapter object. The |seed| parameter is used to create the UID
+ // for this chapter atom.
+ void Init(unsigned int* seed);
+
+ // Copies this Chapter object to a different one. This is used when
+ // expanding a plain array of Chapter objects (see Chapters).
+ void ShallowCopy(Chapter* dst) const;
+
+ // Reclaim resources used by this Chapter object, pending its
+ // destruction.
+ void Clear();
+
+ // If there is no storage remaining on the |displays_| array for a
+ // new display object, creates a new, longer array and copies the
+ // existing Display objects to the new array. Returns false if the
+ // array cannot be expanded.
+ bool ExpandDisplaysArray();
+
+ // If |writer| is non-NULL, serialize the Atom sub-element into the
+ // stream. Returns the total size of the element on success, 0 if
+ // error.
+ uint64_t WriteAtom(IMkvWriter* writer) const;
+
+ // The string identifier for this chapter (corresponds to WebVTT cue
+ // identifier).
+ char* id_;
+
+ // Start timecode of the chapter.
+ uint64_t start_timecode_;
+
+ // Stop timecode of the chapter.
+ uint64_t end_timecode_;
+
+ // The binary identifier for this chapter.
+ uint64_t uid_;
+
+ // The Atom element can contain multiple Display sub-elements, as
+ // the same logical title can be rendered in different languages.
+ Display* displays_;
+
+ // The physical length (total size) of the |displays_| array.
+ int displays_size_;
+
+ // The logical length (number of active elements) on the |displays_|
+ // array.
+ int displays_count_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Chapter);
+};
+
+///////////////////////////////////////////////////////////////
+// Chapters element
+//
+class Chapters {
+ public:
+ Chapters();
+ ~Chapters();
+
+ Chapter* AddChapter(unsigned int* seed);
+
+ // Returns the number of chapters that have been added.
+ int Count() const;
+
+ // Output the Chapters element to the writer. Returns true on success.
+ bool Write(IMkvWriter* writer) const;
+
+ private:
+ // Expands the chapters_ array if there is not enough space to contain
+ // another chapter object. Returns true on success.
+ bool ExpandChaptersArray();
+
+ // If |writer| is non-NULL, serialize the Edition sub-element of the
+ // Chapters element into the stream. Returns the Edition element
+ // size on success, 0 if error.
+ uint64_t WriteEdition(IMkvWriter* writer) const;
+
+ // Total length of the chapters_ array.
+ int chapters_size_;
+
+ // Number of active chapters on the chapters_ array.
+ int chapters_count_;
+
+ // Array for storage of chapter objects.
+ Chapter* chapters_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Chapters);
+};
+
+///////////////////////////////////////////////////////////////
+// Tag element
+//
+class Tag {
+ public:
+ bool add_simple_tag(const char* tag_name, const char* tag_string);
+
+ private:
+ // Tags calls Clear and the destructor of Tag
+ friend class Tags;
+
+ // For storage of simple tags
+ class SimpleTag {
+ public:
+ // Establish representation invariant for new SimpleTag object.
+ void Init();
+
+ // Reclaim resources, in anticipation of destruction.
+ void Clear();
+
+ // Copies the title to the |tag_name_| member. Returns false on
+ // error.
+ bool set_tag_name(const char* tag_name);
+
+ // Copies the language to the |tag_string_| member. Returns false
+ // on error.
+ bool set_tag_string(const char* tag_string);
+
+ // If |writer| is non-NULL, serialize the SimpleTag sub-element of
+ // the Atom into the stream. Returns the SimpleTag element size on
+ // success, 0 if error.
+ uint64_t Write(IMkvWriter* writer) const;
+
+ private:
+ char* tag_name_;
+ char* tag_string_;
+ };
+
+ Tag();
+ ~Tag();
+
+ // Copies this Tag object to a different one. This is used when
+ // expanding a plain array of Tag objects (see Tags).
+ void ShallowCopy(Tag* dst) const;
+
+ // Reclaim resources used by this Tag object, pending its
+ // destruction.
+ void Clear();
+
+ // If there is no storage remaining on the |simple_tags_| array for a
+ // new display object, creates a new, longer array and copies the
+ // existing SimpleTag objects to the new array. Returns false if the
+ // array cannot be expanded.
+ bool ExpandSimpleTagsArray();
+
+ // If |writer| is non-NULL, serialize the Tag sub-element into the
+ // stream. Returns the total size of the element on success, 0 if
+ // error.
+ uint64_t Write(IMkvWriter* writer) const;
+
+ // The Atom element can contain multiple SimpleTag sub-elements
+ SimpleTag* simple_tags_;
+
+ // The physical length (total size) of the |simple_tags_| array.
+ int simple_tags_size_;
+
+ // The logical length (number of active elements) on the |simple_tags_|
+ // array.
+ int simple_tags_count_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Tag);
+};
+
+///////////////////////////////////////////////////////////////
+// Tags element
+//
+class Tags {
+ public:
+ Tags();
+ ~Tags();
+
+ Tag* AddTag();
+
+ // Returns the number of tags that have been added.
+ int Count() const;
+
+ // Output the Tags element to the writer. Returns true on success.
+ bool Write(IMkvWriter* writer) const;
+
+ private:
+ // Expands the tags_ array if there is not enough space to contain
+ // another tag object. Returns true on success.
+ bool ExpandTagsArray();
+
+ // Total length of the tags_ array.
+ int tags_size_;
+
+ // Number of active tags on the tags_ array.
+ int tags_count_;
+
+ // Array for storage of tag objects.
+ Tag* tags_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Tags);
+};
+
+///////////////////////////////////////////////////////////////
+// Cluster element
+//
+// Notes:
+// |Init| must be called before any other method in this class.
+class Cluster {
+ public:
+ // |timecode| is the absolute timecode of the cluster. |cues_pos| is the
+ // position for the cluster within the segment that should be written in
+ // the cues element. |timecode_scale| is the timecode scale of the segment.
+ Cluster(uint64_t timecode, int64_t cues_pos, uint64_t timecode_scale,
+ bool write_last_frame_with_duration = false,
+ bool fixed_size_timecode = false);
+ ~Cluster();
+
+ bool Init(IMkvWriter* ptr_writer);
+
+ // Adds a frame to be output in the file. The frame is written out through
+ // |writer_| if successful. Returns true on success.
+ bool AddFrame(const Frame* frame);
+
+ // Adds a frame to be output in the file. The frame is written out through
+ // |writer_| if successful. Returns true on success.
+ // Inputs:
+ // data: Pointer to the data
+ // length: Length of the data
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions. The range of allowed values is [1, 126].
+ // timecode: Absolute (not relative to cluster) timestamp of the
+ // frame, expressed in timecode units.
+ // is_key: Flag telling whether or not this frame is a key frame.
+ bool AddFrame(const uint8_t* data, uint64_t length, uint64_t track_number,
+ uint64_t timecode, // timecode units (absolute)
+ bool is_key);
+
+ // Adds a frame to be output in the file. The frame is written out through
+ // |writer_| if successful. Returns true on success.
+ // Inputs:
+ // data: Pointer to the data
+ // length: Length of the data
+ // additional: Pointer to the additional data
+ // additional_length: Length of the additional data
+ // add_id: Value of BlockAddID element
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions. The range of allowed values is [1, 126].
+ // abs_timecode: Absolute (not relative to cluster) timestamp of the
+ // frame, expressed in timecode units.
+ // is_key: Flag telling whether or not this frame is a key frame.
+ bool AddFrameWithAdditional(const uint8_t* data, uint64_t length,
+ const uint8_t* additional,
+ uint64_t additional_length, uint64_t add_id,
+ uint64_t track_number, uint64_t abs_timecode,
+ bool is_key);
+
+ // Adds a frame to be output in the file. The frame is written out through
+ // |writer_| if successful. Returns true on success.
+ // Inputs:
+ // data: Pointer to the data.
+ // length: Length of the data.
+ // discard_padding: DiscardPadding element value.
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions. The range of allowed values is [1, 126].
+ // abs_timecode: Absolute (not relative to cluster) timestamp of the
+ // frame, expressed in timecode units.
+ // is_key: Flag telling whether or not this frame is a key frame.
+ bool AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length,
+ int64_t discard_padding,
+ uint64_t track_number, uint64_t abs_timecode,
+ bool is_key);
+
+ // Writes a frame of metadata to the output medium; returns true on
+ // success.
+ // Inputs:
+ // data: Pointer to the data
+ // length: Length of the data
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions. The range of allowed values is [1, 126].
+ // timecode: Absolute (not relative to cluster) timestamp of the
+ // metadata frame, expressed in timecode units.
+ // duration: Duration of metadata frame, in timecode units.
+ //
+ // The metadata frame is written as a block group, with a duration
+ // sub-element but no reference time sub-elements (indicating that
+ // it is considered a keyframe, per Matroska semantics).
+ bool AddMetadata(const uint8_t* data, uint64_t length, uint64_t track_number,
+ uint64_t timecode, uint64_t duration);
+
+ // Increments the size of the cluster's data in bytes.
+ void AddPayloadSize(uint64_t size);
+
+ // Closes the cluster so no more data can be written to it. Will update the
+ // cluster's size if |writer_| is seekable. Returns true on success. This
+ // variant of Finalize() fails when |write_last_frame_with_duration_| is set
+ // to true.
+ bool Finalize();
+
+ // Closes the cluster so no more data can be written to it. Will update the
+ // cluster's size if |writer_| is seekable. Returns true on success.
+ // Inputs:
+ // set_last_frame_duration: Boolean indicating whether or not the duration
+ // of the last frame should be set. If set to
+ // false, the |duration| value is ignored and
+ // |write_last_frame_with_duration_| will not be
+ // honored.
+ // duration: Duration of the Cluster in timecode scale.
+ bool Finalize(bool set_last_frame_duration, uint64_t duration);
+
+ // Returns the size in bytes for the entire Cluster element.
+ uint64_t Size() const;
+
+ // Given |abs_timecode|, calculates timecode relative to most recent timecode.
+ // Returns -1 on failure, or a relative timecode.
+ int64_t GetRelativeTimecode(int64_t abs_timecode) const;
+
+ int64_t size_position() const { return size_position_; }
+ int32_t blocks_added() const { return blocks_added_; }
+ uint64_t payload_size() const { return payload_size_; }
+ int64_t position_for_cues() const { return position_for_cues_; }
+ uint64_t timecode() const { return timecode_; }
+ uint64_t timecode_scale() const { return timecode_scale_; }
+ void set_write_last_frame_with_duration(bool write_last_frame_with_duration) {
+ write_last_frame_with_duration_ = write_last_frame_with_duration;
+ }
+ bool write_last_frame_with_duration() const {
+ return write_last_frame_with_duration_;
+ }
+
+ private:
+ // Iterator type for the |stored_frames_| map.
+ typedef std::map<uint64_t, std::list<Frame*> >::iterator FrameMapIterator;
+
+ // Utility method that confirms that blocks can still be added, and that the
+ // cluster header has been written. Used by |DoWriteFrame*|. Returns true
+ // when successful.
+ bool PreWriteBlock();
+
+ // Utility method used by the |DoWriteFrame*| methods that handles the book
+ // keeping required after each block is written.
+ void PostWriteBlock(uint64_t element_size);
+
+ // Does some verification and calls WriteFrame.
+ bool DoWriteFrame(const Frame* const frame);
+
+ // Either holds back the given frame, or writes it out depending on whether or
+ // not |write_last_frame_with_duration_| is set.
+ bool QueueOrWriteFrame(const Frame* const frame);
+
+ // Outputs the Cluster header to |writer_|. Returns true on success.
+ bool WriteClusterHeader();
+
+ // Number of blocks added to the cluster.
+ int32_t blocks_added_;
+
+ // Flag telling if the cluster has been closed.
+ bool finalized_;
+
+ // Flag indicating whether the cluster's timecode will always be written out
+ // using 8 bytes.
+ bool fixed_size_timecode_;
+
+ // Flag telling if the cluster's header has been written.
+ bool header_written_;
+
+ // The size of the cluster elements in bytes.
+ uint64_t payload_size_;
+
+ // The file position used for cue points.
+ const int64_t position_for_cues_;
+
+ // The file position of the cluster's size element.
+ int64_t size_position_;
+
+ // The absolute timecode of the cluster.
+ const uint64_t timecode_;
+
+ // The timecode scale of the Segment containing the cluster.
+ const uint64_t timecode_scale_;
+
+ // Flag indicating whether the last frame of the cluster should be written as
+ // a Block with Duration. If set to true, then it will result in holding back
+ // of frames and the parameterized version of Finalize() must be called to
+ // finish writing the Cluster.
+ bool write_last_frame_with_duration_;
+
+ // Map used to hold back frames, if required. Track number is the key.
+ std::map<uint64_t, std::list<Frame*> > stored_frames_;
+
+ // Map from track number to the timestamp of the last block written for that
+ // track.
+ std::map<uint64_t, uint64_t> last_block_timestamp_;
+
+ // Pointer to the writer object. Not owned by this class.
+ IMkvWriter* writer_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Cluster);
+};
+
+///////////////////////////////////////////////////////////////
+// SeekHead element
+class SeekHead {
+ public:
+ SeekHead();
+ ~SeekHead();
+
+ // TODO(fgalligan): Change this to reserve a certain size. Then check how
+ // big the seek entry to be added is as not every seek entry will be the
+ // maximum size it could be.
+ // Adds a seek entry to be written out when the element is finalized. |id|
+ // must be the coded mkv element id. |pos| is the file position of the
+ // element. Returns true on success.
+ bool AddSeekEntry(uint32_t id, uint64_t pos);
+
+ // Writes out SeekHead and SeekEntry elements. Returns true on success.
+ bool Finalize(IMkvWriter* writer) const;
+
+ // Returns the id of the Seek Entry at the given index. Returns -1 if index is
+ // out of range.
+ uint32_t GetId(int index) const;
+
+ // Returns the position of the Seek Entry at the given index. Returns -1 if
+ // index is out of range.
+ uint64_t GetPosition(int index) const;
+
+ // Sets the Seek Entry id and position at given index.
+ // Returns true on success.
+ bool SetSeekEntry(int index, uint32_t id, uint64_t position);
+
+ // Reserves space by writing out a Void element which will be updated with
+ // a SeekHead element later. Returns true on success.
+ bool Write(IMkvWriter* writer);
+
+ // We are going to put a cap on the number of Seek Entries.
+ const static int32_t kSeekEntryCount = 5;
+
+ private:
+ // Returns the maximum size in bytes of one seek entry.
+ uint64_t MaxEntrySize() const;
+
+ // Seek entry id element list.
+ uint32_t seek_entry_id_[kSeekEntryCount];
+
+ // Seek entry pos element list.
+ uint64_t seek_entry_pos_[kSeekEntryCount];
+
+ // The file position of SeekHead element.
+ int64_t start_pos_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(SeekHead);
+};
+
+///////////////////////////////////////////////////////////////
+// Segment Information element
+class SegmentInfo {
+ public:
+ SegmentInfo();
+ ~SegmentInfo();
+
+ // Will update the duration if |duration_| is > 0.0. Returns true on success.
+ bool Finalize(IMkvWriter* writer) const;
+
+ // Sets |muxing_app_| and |writing_app_|.
+ bool Init();
+
+ // Output the Segment Information element to the writer. Returns true on
+ // success.
+ bool Write(IMkvWriter* writer);
+
+ void set_duration(double duration) { duration_ = duration; }
+ double duration() const { return duration_; }
+ void set_muxing_app(const char* app);
+ const char* muxing_app() const { return muxing_app_; }
+ void set_timecode_scale(uint64_t scale) { timecode_scale_ = scale; }
+ uint64_t timecode_scale() const { return timecode_scale_; }
+ void set_writing_app(const char* app);
+ const char* writing_app() const { return writing_app_; }
+ void set_date_utc(int64_t date_utc) { date_utc_ = date_utc; }
+ int64_t date_utc() const { return date_utc_; }
+
+ private:
+ // Segment Information element names.
+ // Initially set to -1 to signify that a duration has not been set and should
+ // not be written out.
+ double duration_;
+ // Set to libwebm-%d.%d.%d.%d, major, minor, build, revision.
+ char* muxing_app_;
+ uint64_t timecode_scale_;
+ // Initially set to libwebm-%d.%d.%d.%d, major, minor, build, revision.
+ char* writing_app_;
+ // LLONG_MIN when DateUTC is not set.
+ int64_t date_utc_;
+
+ // The file position of the duration element.
+ int64_t duration_pos_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(SegmentInfo);
+};
+
+///////////////////////////////////////////////////////////////
+// This class represents the main segment in a WebM file. Currently only
+// supports one Segment element.
+//
+// Notes:
+// |Init| must be called before any other method in this class.
+class Segment {
+ public:
+ enum Mode { kLive = 0x1, kFile = 0x2 };
+
+ enum CuesPosition {
+ kAfterClusters = 0x0, // Position Cues after Clusters - Default
+ kBeforeClusters = 0x1 // Position Cues before Clusters
+ };
+
+ static const uint32_t kDefaultDocTypeVersion = 4;
+ static const uint64_t kDefaultMaxClusterDuration = 30000000000ULL;
+
+ Segment();
+ ~Segment();
+
+ // Initializes |SegmentInfo| and returns result. Always returns false when
+ // |ptr_writer| is NULL.
+ bool Init(IMkvWriter* ptr_writer);
+
+ // Adds a generic track to the segment. Returns the newly-allocated
+ // track object (which is owned by the segment) on success, NULL on
+ // error. |number| is the number to use for the track. |number|
+ // must be >= 0. If |number| == 0 then the muxer will decide on the
+ // track number.
+ Track* AddTrack(int32_t number);
+
+ // Adds a Vorbis audio track to the segment. Returns the number of the track
+ // on success, 0 on error. |number| is the number to use for the audio track.
+ // |number| must be >= 0. If |number| == 0 then the muxer will decide on
+ // the track number.
+ uint64_t AddAudioTrack(int32_t sample_rate, int32_t channels, int32_t number);
+
+ // Adds an empty chapter to the chapters of this segment. Returns
+ // non-NULL on success. After adding the chapter, the caller should
+ // populate its fields via the Chapter member functions.
+ Chapter* AddChapter();
+
+ // Adds an empty tag to the tags of this segment. Returns
+ // non-NULL on success. After adding the tag, the caller should
+ // populate its fields via the Tag member functions.
+ Tag* AddTag();
+
+ // Adds a cue point to the Cues element. |timestamp| is the time in
+ // nanoseconds of the cue's time. |track| is the Track of the Cue. This
+ // function must be called after AddFrame to calculate the correct
+ // BlockNumber for the CuePoint. Returns true on success.
+ bool AddCuePoint(uint64_t timestamp, uint64_t track);
+
+ // Adds a frame to be output in the file. Returns true on success.
+ // Inputs:
+ // data: Pointer to the data
+ // length: Length of the data
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions.
+ // timestamp: Timestamp of the frame in nanoseconds from 0.
+ // is_key: Flag telling whether or not this frame is a key frame.
+ bool AddFrame(const uint8_t* data, uint64_t length, uint64_t track_number,
+ uint64_t timestamp_ns, bool is_key);
+
+ // Writes a frame of metadata to the output medium; returns true on
+ // success.
+ // Inputs:
+ // data: Pointer to the data
+ // length: Length of the data
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions.
+ // timecode: Absolute timestamp of the metadata frame, expressed
+ // in nanosecond units.
+ // duration: Duration of metadata frame, in nanosecond units.
+ //
+ // The metadata frame is written as a block group, with a duration
+ // sub-element but no reference time sub-elements (indicating that
+ // it is considered a keyframe, per Matroska semantics).
+ bool AddMetadata(const uint8_t* data, uint64_t length, uint64_t track_number,
+ uint64_t timestamp_ns, uint64_t duration_ns);
+
+ // Writes a frame with additional data to the output medium; returns true on
+ // success.
+ // Inputs:
+ // data: Pointer to the data.
+ // length: Length of the data.
+ // additional: Pointer to additional data.
+ // additional_length: Length of additional data.
+ // add_id: Additional ID which identifies the type of additional data.
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions.
+ // timestamp: Absolute timestamp of the frame, expressed in nanosecond
+ // units.
+ // is_key: Flag telling whether or not this frame is a key frame.
+ bool AddFrameWithAdditional(const uint8_t* data, uint64_t length,
+ const uint8_t* additional,
+ uint64_t additional_length, uint64_t add_id,
+ uint64_t track_number, uint64_t timestamp,
+ bool is_key);
+
+ // Writes a frame with DiscardPadding to the output medium; returns true on
+ // success.
+ // Inputs:
+ // data: Pointer to the data.
+ // length: Length of the data.
+ // discard_padding: DiscardPadding element value.
+ // track_number: Track to add the data to. Value returned by Add track
+ // functions.
+ // timestamp: Absolute timestamp of the frame, expressed in nanosecond
+ // units.
+ // is_key: Flag telling whether or not this frame is a key frame.
+ bool AddFrameWithDiscardPadding(const uint8_t* data, uint64_t length,
+ int64_t discard_padding,
+ uint64_t track_number, uint64_t timestamp,
+ bool is_key);
+
+ // Writes a Frame to the output medium. Chooses the correct way of writing
+ // the frame (Block vs SimpleBlock) based on the parameters passed.
+ // Inputs:
+ // frame: frame object
+ bool AddGenericFrame(const Frame* frame);
+
+ // Adds a VP8 video track to the segment. Returns the number of the track on
+ // success, 0 on error. |number| is the number to use for the video track.
+ // |number| must be >= 0. If |number| == 0 then the muxer will decide on
+ // the track number.
+ uint64_t AddVideoTrack(int32_t width, int32_t height, int32_t number);
+
+ // This function must be called after Finalize() if you need a copy of the
+ // output with Cues written before the Clusters. It will return false if the
+ // writer is not seekable of if chunking is set to true.
+ // Input parameters:
+ // reader - an IMkvReader object created with the same underlying file of the
+ // current writer object. Make sure to close the existing writer
+ // object before creating this so that all the data is properly
+ // flushed and available for reading.
+ // writer - an IMkvWriter object pointing to a *different* file than the one
+ // pointed by the current writer object. This file will contain the
+ // Cues element before the Clusters.
+ bool CopyAndMoveCuesBeforeClusters(mkvparser::IMkvReader* reader,
+ IMkvWriter* writer);
+
+ // Sets which track to use for the Cues element. Must have added the track
+ // before calling this function. Returns true on success. |track_number| is
+ // returned by the Add track functions.
+ bool CuesTrack(uint64_t track_number);
+
+ // This will force the muxer to create a new Cluster when the next frame is
+ // added.
+ void ForceNewClusterOnNextFrame();
+
+ // Writes out any frames that have not been written out. Finalizes the last
+ // cluster. May update the size and duration of the segment. May output the
+ // Cues element. May finalize the SeekHead element. Returns true on success.
+ bool Finalize();
+
+ // Returns the Cues object.
+ Cues* GetCues() { return &cues_; }
+
+ // Returns the Segment Information object.
+ const SegmentInfo* GetSegmentInfo() const { return &segment_info_; }
+ SegmentInfo* GetSegmentInfo() { return &segment_info_; }
+
+ // Search the Tracks and return the track that matches |track_number|.
+ // Returns NULL if there is no track match.
+ Track* GetTrackByNumber(uint64_t track_number) const;
+
+ // Toggles whether to output a cues element.
+ void OutputCues(bool output_cues);
+
+ // Toggles whether to write the last frame in each Cluster with Duration.
+ void AccurateClusterDuration(bool accurate_cluster_duration);
+
+ // Toggles whether to write the Cluster Timecode using exactly 8 bytes.
+ void UseFixedSizeClusterTimecode(bool fixed_size_cluster_timecode);
+
+ // Sets if the muxer will output files in chunks or not. |chunking| is a
+ // flag telling whether or not to turn on chunking. |filename| is the base
+ // filename for the chunk files. The header chunk file will be named
+ // |filename|.hdr and the data chunks will be named
+ // |filename|_XXXXXX.chk. Chunking implies that the muxer will be writing
+ // to files so the muxer will use the default MkvWriter class to control
+ // what data is written to what files. Returns true on success.
+ // TODO: Should we change the IMkvWriter Interface to add Open and Close?
+ // That will force the interface to be dependent on files.
+ bool SetChunking(bool chunking, const char* filename);
+
+ bool chunking() const { return chunking_; }
+ uint64_t cues_track() const { return cues_track_; }
+ void set_max_cluster_duration(uint64_t max_cluster_duration) {
+ max_cluster_duration_ = max_cluster_duration;
+ }
+ uint64_t max_cluster_duration() const { return max_cluster_duration_; }
+ void set_max_cluster_size(uint64_t max_cluster_size) {
+ max_cluster_size_ = max_cluster_size;
+ }
+ uint64_t max_cluster_size() const { return max_cluster_size_; }
+ void set_mode(Mode mode) { mode_ = mode; }
+ Mode mode() const { return mode_; }
+ CuesPosition cues_position() const { return cues_position_; }
+ bool output_cues() const { return output_cues_; }
+ void set_estimate_file_duration(bool estimate_duration) {
+ estimate_file_duration_ = estimate_duration;
+ }
+ bool estimate_file_duration() const { return estimate_file_duration_; }
+ const SegmentInfo* segment_info() const { return &segment_info_; }
+ void set_duration(double duration) { duration_ = duration; }
+ double duration() const { return duration_; }
+
+ // Returns true when codec IDs are valid for WebM.
+ bool DocTypeIsWebm() const;
+
+ private:
+ // Checks if header information has been output and initialized. If not it
+ // will output the Segment element and initialize the SeekHead elment and
+ // Cues elements.
+ bool CheckHeaderInfo();
+
+ // Sets |doc_type_version_| based on the current element requirements.
+ void UpdateDocTypeVersion();
+
+ // Sets |name| according to how many chunks have been written. |ext| is the
+ // file extension. |name| must be deleted by the calling app. Returns true
+ // on success.
+ bool UpdateChunkName(const char* ext, char** name) const;
+
+ // Returns the maximum offset within the segment's payload. When chunking
+ // this function is needed to determine offsets of elements within the
+ // chunked files. Returns -1 on error.
+ int64_t MaxOffset();
+
+ // Adds the frame to our frame array.
+ bool QueueFrame(Frame* frame);
+
+ // Output all frames that are queued. Returns -1 on error, otherwise
+ // it returns the number of frames written.
+ int WriteFramesAll();
+
+ // Output all frames that are queued that have an end time that is less
+ // then |timestamp|. Returns true on success and if there are no frames
+ // queued.
+ bool WriteFramesLessThan(uint64_t timestamp);
+
+ // Outputs the segment header, Segment Information element, SeekHead element,
+ // and Tracks element to |writer_|.
+ bool WriteSegmentHeader();
+
+ // Given a frame with the specified timestamp (nanosecond units) and
+ // keyframe status, determine whether a new cluster should be
+ // created, before writing enqueued frames and the frame itself. The
+ // function returns one of the following values:
+ // -1 = error: an out-of-order frame was detected
+ // 0 = do not create a new cluster, and write frame to the existing cluster
+ // 1 = create a new cluster, and write frame to that new cluster
+ // 2 = create a new cluster, and re-run test
+ int TestFrame(uint64_t track_num, uint64_t timestamp_ns, bool key) const;
+
+ // Create a new cluster, using the earlier of the first enqueued
+ // frame, or the indicated time. Returns true on success.
+ bool MakeNewCluster(uint64_t timestamp_ns);
+
+ // Checks whether a new cluster needs to be created, and if so
+ // creates a new cluster. Returns false if creation of a new cluster
+ // was necessary but creation was not successful.
+ bool DoNewClusterProcessing(uint64_t track_num, uint64_t timestamp_ns,
+ bool key);
+
+ // Adjusts Cue Point values (to place Cues before Clusters) so that they
+ // reflect the correct offsets.
+ void MoveCuesBeforeClusters();
+
+ // This function recursively computes the correct cluster offsets (this is
+ // done to move the Cues before Clusters). It recursively updates the change
+ // in size (which indicates a change in cluster offset) until no sizes change.
+ // Parameters:
+ // diff - indicates the difference in size of the Cues element that needs to
+ // accounted for.
+ // index - index in the list of Cues which is currently being adjusted.
+ // cue_size - sum of size of all the CuePoint elements.
+ void MoveCuesBeforeClustersHelper(uint64_t diff, int index,
+ uint64_t* cue_size);
+
+ // Seeds the random number generator used to make UIDs.
+ unsigned int seed_;
+
+ // WebM elements
+ Cues cues_;
+ SeekHead seek_head_;
+ SegmentInfo segment_info_;
+ Tracks tracks_;
+ Chapters chapters_;
+ Tags tags_;
+
+ // Number of chunks written.
+ int chunk_count_;
+
+ // Current chunk filename.
+ char* chunk_name_;
+
+ // Default MkvWriter object created by this class used for writing clusters
+ // out in separate files.
+ MkvWriter* chunk_writer_cluster_;
+
+ // Default MkvWriter object created by this class used for writing Cues
+ // element out to a file.
+ MkvWriter* chunk_writer_cues_;
+
+ // Default MkvWriter object created by this class used for writing the
+ // Matroska header out to a file.
+ MkvWriter* chunk_writer_header_;
+
+ // Flag telling whether or not the muxer is chunking output to multiple
+ // files.
+ bool chunking_;
+
+ // Base filename for the chunked files.
+ char* chunking_base_name_;
+
+ // File position offset where the Clusters end.
+ int64_t cluster_end_offset_;
+
+ // List of clusters.
+ Cluster** cluster_list_;
+
+ // Number of cluster pointers allocated in the cluster list.
+ int32_t cluster_list_capacity_;
+
+ // Number of clusters in the cluster list.
+ int32_t cluster_list_size_;
+
+ // Indicates whether Cues should be written before or after Clusters
+ CuesPosition cues_position_;
+
+ // Track number that is associated with the cues element for this segment.
+ uint64_t cues_track_;
+
+ // Tells the muxer to force a new cluster on the next Block.
+ bool force_new_cluster_;
+
+ // List of stored audio frames. These variables are used to store frames so
+ // the muxer can follow the guideline "Audio blocks that contain the video
+ // key frame's timecode should be in the same cluster as the video key frame
+ // block."
+ Frame** frames_;
+
+ // Number of frame pointers allocated in the frame list.
+ int32_t frames_capacity_;
+
+ // Number of frames in the frame list.
+ int32_t frames_size_;
+
+ // Flag telling if a video track has been added to the segment.
+ bool has_video_;
+
+ // Flag telling if the segment's header has been written.
+ bool header_written_;
+
+ // Duration of the last block in nanoseconds.
+ uint64_t last_block_duration_;
+
+ // Last timestamp in nanoseconds added to a cluster.
+ uint64_t last_timestamp_;
+
+ // Last timestamp in nanoseconds by track number added to a cluster.
+ uint64_t last_track_timestamp_[kMaxTrackNumber];
+
+ // Number of frames written per track.
+ uint64_t track_frames_written_[kMaxTrackNumber];
+
+ // Maximum time in nanoseconds for a cluster duration. This variable is a
+ // guideline and some clusters may have a longer duration. Default is 30
+ // seconds.
+ uint64_t max_cluster_duration_;
+
+ // Maximum size in bytes for a cluster. This variable is a guideline and
+ // some clusters may have a larger size. Default is 0 which signifies that
+ // the muxer will decide the size.
+ uint64_t max_cluster_size_;
+
+ // The mode that segment is in. If set to |kLive| the writer must not
+ // seek backwards.
+ Mode mode_;
+
+ // Flag telling the muxer that a new cue point should be added.
+ bool new_cuepoint_;
+
+ // TODO(fgalligan): Should we add support for more than one Cues element?
+ // Flag whether or not the muxer should output a Cues element.
+ bool output_cues_;
+
+ // Flag whether or not the last frame in each Cluster will have a Duration
+ // element in it.
+ bool accurate_cluster_duration_;
+
+ // Flag whether or not to write the Cluster Timecode using exactly 8 bytes.
+ bool fixed_size_cluster_timecode_;
+
+ // Flag whether or not to estimate the file duration.
+ bool estimate_file_duration_;
+
+ // The size of the EBML header, used to validate the header if
+ // WriteEbmlHeader() is called more than once.
+ int32_t ebml_header_size_;
+
+ // The file position of the segment's payload.
+ int64_t payload_pos_;
+
+ // The file position of the element's size.
+ int64_t size_position_;
+
+ // Current DocTypeVersion (|doc_type_version_|) and that written in
+ // WriteSegmentHeader().
+ // WriteEbmlHeader() will be called from Finalize() if |doc_type_version_|
+ // differs from |doc_type_version_written_|.
+ uint32_t doc_type_version_;
+ uint32_t doc_type_version_written_;
+
+ // If |duration_| is > 0, then explicitly set the duration of the segment.
+ double duration_;
+
+ // Pointer to the writer objects. Not owned by this class.
+ IMkvWriter* writer_cluster_;
+ IMkvWriter* writer_cues_;
+ IMkvWriter* writer_header_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(Segment);
+};
+
+} // namespace mkvmuxer
+
+#endif // MKVMUXER_MKVMUXER_H_
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxertypes.h b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxertypes.h
new file mode 100644
index 000000000..e5db12160
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxertypes.h
@@ -0,0 +1,28 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#ifndef MKVMUXER_MKVMUXERTYPES_H_
+#define MKVMUXER_MKVMUXERTYPES_H_
+
+namespace mkvmuxer {
+typedef unsigned char uint8;
+typedef short int16;
+typedef int int32;
+typedef unsigned int uint32;
+typedef long long int64;
+typedef unsigned long long uint64;
+} // namespace mkvmuxer
+
+// Copied from Chromium basictypes.h
+// A macro to disallow the copy constructor and operator= functions
+// This should be used in the private: declarations for a class
+#define LIBWEBM_DISALLOW_COPY_AND_ASSIGN(TypeName) \
+ TypeName(const TypeName&); \
+ void operator=(const TypeName&)
+
+#endif // MKVMUXER_MKVMUXERTYPES_HPP_
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc
new file mode 100644
index 000000000..355d4e22b
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.cc
@@ -0,0 +1,744 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#include "mkvmuxer/mkvmuxerutil.h"
+
+#ifdef __ANDROID__
+#include <fcntl.h>
+#include <unistd.h>
+#endif
+
+#include <cassert>
+#include <cmath>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <ctime>
+#include <new>
+
+#include "common/webmids.h"
+#include "mkvmuxer/mkvmuxer.h"
+#include "mkvmuxer/mkvwriter.h"
+
+namespace mkvmuxer {
+
+namespace {
+
+// Date elements are always 8 octets in size.
+const int kDateElementSize = 8;
+
+uint64 WriteBlock(IMkvWriter* writer, const Frame* const frame, int64 timecode,
+ uint64 timecode_scale) {
+ uint64 block_additional_elem_size = 0;
+ uint64 block_addid_elem_size = 0;
+ uint64 block_more_payload_size = 0;
+ uint64 block_more_elem_size = 0;
+ uint64 block_additions_payload_size = 0;
+ uint64 block_additions_elem_size = 0;
+ if (frame->additional()) {
+ block_additional_elem_size =
+ EbmlElementSize(libwebm::kMkvBlockAdditional, frame->additional(),
+ frame->additional_length());
+ block_addid_elem_size = EbmlElementSize(
+ libwebm::kMkvBlockAddID, static_cast<uint64>(frame->add_id()));
+
+ block_more_payload_size =
+ block_addid_elem_size + block_additional_elem_size;
+ block_more_elem_size =
+ EbmlMasterElementSize(libwebm::kMkvBlockMore, block_more_payload_size) +
+ block_more_payload_size;
+ block_additions_payload_size = block_more_elem_size;
+ block_additions_elem_size =
+ EbmlMasterElementSize(libwebm::kMkvBlockAdditions,
+ block_additions_payload_size) +
+ block_additions_payload_size;
+ }
+
+ uint64 discard_padding_elem_size = 0;
+ if (frame->discard_padding() != 0) {
+ discard_padding_elem_size =
+ EbmlElementSize(libwebm::kMkvDiscardPadding,
+ static_cast<int64>(frame->discard_padding()));
+ }
+
+ const uint64 reference_block_timestamp =
+ frame->reference_block_timestamp() / timecode_scale;
+ uint64 reference_block_elem_size = 0;
+ if (!frame->is_key()) {
+ reference_block_elem_size =
+ EbmlElementSize(libwebm::kMkvReferenceBlock, reference_block_timestamp);
+ }
+
+ const uint64 duration = frame->duration() / timecode_scale;
+ uint64 block_duration_elem_size = 0;
+ if (duration > 0)
+ block_duration_elem_size =
+ EbmlElementSize(libwebm::kMkvBlockDuration, duration);
+
+ const uint64 block_payload_size = 4 + frame->length();
+ const uint64 block_elem_size =
+ EbmlMasterElementSize(libwebm::kMkvBlock, block_payload_size) +
+ block_payload_size;
+
+ const uint64 block_group_payload_size =
+ block_elem_size + block_additions_elem_size + block_duration_elem_size +
+ discard_padding_elem_size + reference_block_elem_size;
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockGroup,
+ block_group_payload_size)) {
+ return 0;
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlock, block_payload_size))
+ return 0;
+
+ if (WriteUInt(writer, frame->track_number()))
+ return 0;
+
+ if (SerializeInt(writer, timecode, 2))
+ return 0;
+
+ // For a Block, flags is always 0.
+ if (SerializeInt(writer, 0, 1))
+ return 0;
+
+ if (writer->Write(frame->frame(), static_cast<uint32>(frame->length())))
+ return 0;
+
+ if (frame->additional()) {
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockAdditions,
+ block_additions_payload_size)) {
+ return 0;
+ }
+
+ if (!WriteEbmlMasterElement(writer, libwebm::kMkvBlockMore,
+ block_more_payload_size))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvBlockAddID,
+ static_cast<uint64>(frame->add_id())))
+ return 0;
+
+ if (!WriteEbmlElement(writer, libwebm::kMkvBlockAdditional,
+ frame->additional(), frame->additional_length())) {
+ return 0;
+ }
+ }
+
+ if (frame->discard_padding() != 0 &&
+ !WriteEbmlElement(writer, libwebm::kMkvDiscardPadding,
+ static_cast<int64>(frame->discard_padding()))) {
+ return false;
+ }
+
+ if (!frame->is_key() &&
+ !WriteEbmlElement(writer, libwebm::kMkvReferenceBlock,
+ reference_block_timestamp)) {
+ return false;
+ }
+
+ if (duration > 0 &&
+ !WriteEbmlElement(writer, libwebm::kMkvBlockDuration, duration)) {
+ return false;
+ }
+ return EbmlMasterElementSize(libwebm::kMkvBlockGroup,
+ block_group_payload_size) +
+ block_group_payload_size;
+}
+
+uint64 WriteSimpleBlock(IMkvWriter* writer, const Frame* const frame,
+ int64 timecode) {
+ if (WriteID(writer, libwebm::kMkvSimpleBlock))
+ return 0;
+
+ const int32 size = static_cast<int32>(frame->length()) + 4;
+ if (WriteUInt(writer, size))
+ return 0;
+
+ if (WriteUInt(writer, static_cast<uint64>(frame->track_number())))
+ return 0;
+
+ if (SerializeInt(writer, timecode, 2))
+ return 0;
+
+ uint64 flags = 0;
+ if (frame->is_key())
+ flags |= 0x80;
+
+ if (SerializeInt(writer, flags, 1))
+ return 0;
+
+ if (writer->Write(frame->frame(), static_cast<uint32>(frame->length())))
+ return 0;
+
+ return GetUIntSize(libwebm::kMkvSimpleBlock) + GetCodedUIntSize(size) + 4 +
+ frame->length();
+}
+
+} // namespace
+
+int32 GetCodedUIntSize(uint64 value) {
+ if (value < 0x000000000000007FULL)
+ return 1;
+ else if (value < 0x0000000000003FFFULL)
+ return 2;
+ else if (value < 0x00000000001FFFFFULL)
+ return 3;
+ else if (value < 0x000000000FFFFFFFULL)
+ return 4;
+ else if (value < 0x00000007FFFFFFFFULL)
+ return 5;
+ else if (value < 0x000003FFFFFFFFFFULL)
+ return 6;
+ else if (value < 0x0001FFFFFFFFFFFFULL)
+ return 7;
+ return 8;
+}
+
+int32 GetUIntSize(uint64 value) {
+ if (value < 0x0000000000000100ULL)
+ return 1;
+ else if (value < 0x0000000000010000ULL)
+ return 2;
+ else if (value < 0x0000000001000000ULL)
+ return 3;
+ else if (value < 0x0000000100000000ULL)
+ return 4;
+ else if (value < 0x0000010000000000ULL)
+ return 5;
+ else if (value < 0x0001000000000000ULL)
+ return 6;
+ else if (value < 0x0100000000000000ULL)
+ return 7;
+ return 8;
+}
+
+int32 GetIntSize(int64 value) {
+ // Doubling the requested value ensures positive values with their high bit
+ // set are written with 0-padding to avoid flipping the signedness.
+ const uint64 v = (value < 0) ? value ^ -1LL : value;
+ return GetUIntSize(2 * v);
+}
+
+uint64 EbmlMasterElementSize(uint64 type, uint64 value) {
+ // Size of EBML ID
+ int32 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += GetCodedUIntSize(value);
+
+ return ebml_size;
+}
+
+uint64 EbmlElementSize(uint64 type, int64 value) {
+ // Size of EBML ID
+ int32 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += GetIntSize(value);
+
+ // Size of Datasize
+ ebml_size++;
+
+ return ebml_size;
+}
+
+uint64 EbmlElementSize(uint64 type, uint64 value) {
+ return EbmlElementSize(type, value, 0);
+}
+
+uint64 EbmlElementSize(uint64 type, uint64 value, uint64 fixed_size) {
+ // Size of EBML ID
+ uint64 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += (fixed_size > 0) ? fixed_size : GetUIntSize(value);
+
+ // Size of Datasize
+ ebml_size++;
+
+ return ebml_size;
+}
+
+uint64 EbmlElementSize(uint64 type, float /* value */) {
+ // Size of EBML ID
+ uint64 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += sizeof(float);
+
+ // Size of Datasize
+ ebml_size++;
+
+ return ebml_size;
+}
+
+uint64 EbmlElementSize(uint64 type, const char* value) {
+ if (!value)
+ return 0;
+
+ // Size of EBML ID
+ uint64 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += strlen(value);
+
+ // Size of Datasize
+ ebml_size += GetCodedUIntSize(strlen(value));
+
+ return ebml_size;
+}
+
+uint64 EbmlElementSize(uint64 type, const uint8* value, uint64 size) {
+ if (!value)
+ return 0;
+
+ // Size of EBML ID
+ uint64 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += size;
+
+ // Size of Datasize
+ ebml_size += GetCodedUIntSize(size);
+
+ return ebml_size;
+}
+
+uint64 EbmlDateElementSize(uint64 type) {
+ // Size of EBML ID
+ uint64 ebml_size = GetUIntSize(type);
+
+ // Datasize
+ ebml_size += kDateElementSize;
+
+ // Size of Datasize
+ ebml_size++;
+
+ return ebml_size;
+}
+
+int32 SerializeInt(IMkvWriter* writer, int64 value, int32 size) {
+ if (!writer || size < 1 || size > 8)
+ return -1;
+
+ for (int32 i = 1; i <= size; ++i) {
+ const int32 byte_count = size - i;
+ const int32 bit_count = byte_count * 8;
+
+ const int64 bb = value >> bit_count;
+ const uint8 b = static_cast<uint8>(bb);
+
+ const int32 status = writer->Write(&b, 1);
+
+ if (status < 0)
+ return status;
+ }
+
+ return 0;
+}
+
+int32 SerializeFloat(IMkvWriter* writer, float f) {
+ if (!writer)
+ return -1;
+
+ assert(sizeof(uint32) == sizeof(float));
+ // This union is merely used to avoid a reinterpret_cast from float& to
+ // uint32& which will result in violation of strict aliasing.
+ union U32 {
+ uint32 u32;
+ float f;
+ } value;
+ value.f = f;
+
+ for (int32 i = 1; i <= 4; ++i) {
+ const int32 byte_count = 4 - i;
+ const int32 bit_count = byte_count * 8;
+
+ const uint8 byte = static_cast<uint8>(value.u32 >> bit_count);
+
+ const int32 status = writer->Write(&byte, 1);
+
+ if (status < 0)
+ return status;
+ }
+
+ return 0;
+}
+
+int32 WriteUInt(IMkvWriter* writer, uint64 value) {
+ if (!writer)
+ return -1;
+
+ int32 size = GetCodedUIntSize(value);
+
+ return WriteUIntSize(writer, value, size);
+}
+
+int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size) {
+ if (!writer || size < 0 || size > 8)
+ return -1;
+
+ if (size > 0) {
+ const uint64 bit = 1LL << (size * 7);
+
+ if (value > (bit - 2))
+ return -1;
+
+ value |= bit;
+ } else {
+ size = 1;
+ int64 bit;
+
+ for (;;) {
+ bit = 1LL << (size * 7);
+ const uint64 max = bit - 2;
+
+ if (value <= max)
+ break;
+
+ ++size;
+ }
+
+ if (size > 8)
+ return false;
+
+ value |= bit;
+ }
+
+ return SerializeInt(writer, value, size);
+}
+
+int32 WriteID(IMkvWriter* writer, uint64 type) {
+ if (!writer)
+ return -1;
+
+ writer->ElementStartNotify(type, writer->Position());
+
+ const int32 size = GetUIntSize(type);
+
+ return SerializeInt(writer, type, size);
+}
+
+bool WriteEbmlMasterElement(IMkvWriter* writer, uint64 type, uint64 size) {
+ if (!writer)
+ return false;
+
+ if (WriteID(writer, type))
+ return false;
+
+ if (WriteUInt(writer, size))
+ return false;
+
+ return true;
+}
+
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value) {
+ return WriteEbmlElement(writer, type, value, 0);
+}
+
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value,
+ uint64 fixed_size) {
+ if (!writer)
+ return false;
+
+ if (WriteID(writer, type))
+ return false;
+
+ uint64 size = GetUIntSize(value);
+ if (fixed_size > 0) {
+ if (size > fixed_size)
+ return false;
+ size = fixed_size;
+ }
+ if (WriteUInt(writer, size))
+ return false;
+
+ if (SerializeInt(writer, value, static_cast<int32>(size)))
+ return false;
+
+ return true;
+}
+
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, int64 value) {
+ if (!writer)
+ return false;
+
+ if (WriteID(writer, type))
+ return 0;
+
+ const uint64 size = GetIntSize(value);
+ if (WriteUInt(writer, size))
+ return false;
+
+ if (SerializeInt(writer, value, static_cast<int32>(size)))
+ return false;
+
+ return true;
+}
+
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, float value) {
+ if (!writer)
+ return false;
+
+ if (WriteID(writer, type))
+ return false;
+
+ if (WriteUInt(writer, 4))
+ return false;
+
+ if (SerializeFloat(writer, value))
+ return false;
+
+ return true;
+}
+
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const char* value) {
+ if (!writer || !value)
+ return false;
+
+ if (WriteID(writer, type))
+ return false;
+
+ const uint64 length = strlen(value);
+ if (WriteUInt(writer, length))
+ return false;
+
+ if (writer->Write(value, static_cast<const uint32>(length)))
+ return false;
+
+ return true;
+}
+
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const uint8* value,
+ uint64 size) {
+ if (!writer || !value || size < 1)
+ return false;
+
+ if (WriteID(writer, type))
+ return false;
+
+ if (WriteUInt(writer, size))
+ return false;
+
+ if (writer->Write(value, static_cast<uint32>(size)))
+ return false;
+
+ return true;
+}
+
+bool WriteEbmlDateElement(IMkvWriter* writer, uint64 type, int64 value) {
+ if (!writer)
+ return false;
+
+ if (WriteID(writer, type))
+ return false;
+
+ if (WriteUInt(writer, kDateElementSize))
+ return false;
+
+ if (SerializeInt(writer, value, kDateElementSize))
+ return false;
+
+ return true;
+}
+
+uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame,
+ Cluster* cluster) {
+ if (!writer || !frame || !frame->IsValid() || !cluster ||
+ !cluster->timecode_scale())
+ return 0;
+
+ // Technically the timecode for a block can be less than the
+ // timecode for the cluster itself (remember that block timecode
+ // is a signed, 16-bit integer). However, as a simplification we
+ // only permit non-negative cluster-relative timecodes for blocks.
+ const int64 relative_timecode = cluster->GetRelativeTimecode(
+ frame->timestamp() / cluster->timecode_scale());
+ if (relative_timecode < 0 || relative_timecode > kMaxBlockTimecode)
+ return 0;
+
+ return frame->CanBeSimpleBlock() ?
+ WriteSimpleBlock(writer, frame, relative_timecode) :
+ WriteBlock(writer, frame, relative_timecode,
+ cluster->timecode_scale());
+}
+
+uint64 WriteVoidElement(IMkvWriter* writer, uint64 size) {
+ if (!writer)
+ return false;
+
+ // Subtract one for the void ID and the coded size.
+ uint64 void_entry_size = size - 1 - GetCodedUIntSize(size - 1);
+ uint64 void_size = EbmlMasterElementSize(libwebm::kMkvVoid, void_entry_size) +
+ void_entry_size;
+
+ if (void_size != size)
+ return 0;
+
+ const int64 payload_position = writer->Position();
+ if (payload_position < 0)
+ return 0;
+
+ if (WriteID(writer, libwebm::kMkvVoid))
+ return 0;
+
+ if (WriteUInt(writer, void_entry_size))
+ return 0;
+
+ const uint8 value = 0;
+ for (int32 i = 0; i < static_cast<int32>(void_entry_size); ++i) {
+ if (writer->Write(&value, 1))
+ return 0;
+ }
+
+ const int64 stop_position = writer->Position();
+ if (stop_position < 0 ||
+ stop_position - payload_position != static_cast<int64>(void_size))
+ return 0;
+
+ return void_size;
+}
+
+void GetVersion(int32* major, int32* minor, int32* build, int32* revision) {
+ *major = 0;
+ *minor = 2;
+ *build = 1;
+ *revision = 0;
+}
+
+uint64 MakeUID(unsigned int* seed) {
+ uint64 uid = 0;
+
+#ifdef __MINGW32__
+ srand(*seed);
+#endif
+
+ for (int i = 0; i < 7; ++i) { // avoid problems with 8-byte values
+ uid <<= 8;
+
+// TODO(fgalligan): Move random number generation to platform specific code.
+#ifdef _MSC_VER
+ (void)seed;
+ const int32 nn = rand();
+#elif __ANDROID__
+ (void)seed;
+ int32 temp_num = 1;
+ int fd = open("/dev/urandom", O_RDONLY);
+ if (fd != -1) {
+ read(fd, &temp_num, sizeof(temp_num));
+ close(fd);
+ }
+ const int32 nn = temp_num;
+#elif defined __MINGW32__
+ const int32 nn = rand();
+#else
+ const int32 nn = rand_r(seed);
+#endif
+ const int32 n = 0xFF & (nn >> 4); // throw away low-order bits
+
+ uid |= n;
+ }
+
+ return uid;
+}
+
+bool IsMatrixCoefficientsValueValid(uint64_t value) {
+ switch (value) {
+ case mkvmuxer::Colour::kGbr:
+ case mkvmuxer::Colour::kBt709:
+ case mkvmuxer::Colour::kUnspecifiedMc:
+ case mkvmuxer::Colour::kReserved:
+ case mkvmuxer::Colour::kFcc:
+ case mkvmuxer::Colour::kBt470bg:
+ case mkvmuxer::Colour::kSmpte170MMc:
+ case mkvmuxer::Colour::kSmpte240MMc:
+ case mkvmuxer::Colour::kYcocg:
+ case mkvmuxer::Colour::kBt2020NonConstantLuminance:
+ case mkvmuxer::Colour::kBt2020ConstantLuminance:
+ return true;
+ }
+ return false;
+}
+
+bool IsChromaSitingHorzValueValid(uint64_t value) {
+ switch (value) {
+ case mkvmuxer::Colour::kUnspecifiedCsh:
+ case mkvmuxer::Colour::kLeftCollocated:
+ case mkvmuxer::Colour::kHalfCsh:
+ return true;
+ }
+ return false;
+}
+
+bool IsChromaSitingVertValueValid(uint64_t value) {
+ switch (value) {
+ case mkvmuxer::Colour::kUnspecifiedCsv:
+ case mkvmuxer::Colour::kTopCollocated:
+ case mkvmuxer::Colour::kHalfCsv:
+ return true;
+ }
+ return false;
+}
+
+bool IsColourRangeValueValid(uint64_t value) {
+ switch (value) {
+ case mkvmuxer::Colour::kUnspecifiedCr:
+ case mkvmuxer::Colour::kBroadcastRange:
+ case mkvmuxer::Colour::kFullRange:
+ case mkvmuxer::Colour::kMcTcDefined:
+ return true;
+ }
+ return false;
+}
+
+bool IsTransferCharacteristicsValueValid(uint64_t value) {
+ switch (value) {
+ case mkvmuxer::Colour::kIturBt709Tc:
+ case mkvmuxer::Colour::kUnspecifiedTc:
+ case mkvmuxer::Colour::kReservedTc:
+ case mkvmuxer::Colour::kGamma22Curve:
+ case mkvmuxer::Colour::kGamma28Curve:
+ case mkvmuxer::Colour::kSmpte170MTc:
+ case mkvmuxer::Colour::kSmpte240MTc:
+ case mkvmuxer::Colour::kLinear:
+ case mkvmuxer::Colour::kLog:
+ case mkvmuxer::Colour::kLogSqrt:
+ case mkvmuxer::Colour::kIec6196624:
+ case mkvmuxer::Colour::kIturBt1361ExtendedColourGamut:
+ case mkvmuxer::Colour::kIec6196621:
+ case mkvmuxer::Colour::kIturBt202010bit:
+ case mkvmuxer::Colour::kIturBt202012bit:
+ case mkvmuxer::Colour::kSmpteSt2084:
+ case mkvmuxer::Colour::kSmpteSt4281Tc:
+ case mkvmuxer::Colour::kAribStdB67Hlg:
+ return true;
+ }
+ return false;
+}
+
+bool IsPrimariesValueValid(uint64_t value) {
+ switch (value) {
+ case mkvmuxer::Colour::kReservedP0:
+ case mkvmuxer::Colour::kIturBt709P:
+ case mkvmuxer::Colour::kUnspecifiedP:
+ case mkvmuxer::Colour::kReservedP3:
+ case mkvmuxer::Colour::kIturBt470M:
+ case mkvmuxer::Colour::kIturBt470Bg:
+ case mkvmuxer::Colour::kSmpte170MP:
+ case mkvmuxer::Colour::kSmpte240MP:
+ case mkvmuxer::Colour::kFilm:
+ case mkvmuxer::Colour::kIturBt2020:
+ case mkvmuxer::Colour::kSmpteSt4281P:
+ case mkvmuxer::Colour::kJedecP22Phosphors:
+ return true;
+ }
+ return false;
+}
+
+} // namespace mkvmuxer
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.h b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.h
new file mode 100644
index 000000000..132388da5
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvmuxerutil.h
@@ -0,0 +1,112 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#ifndef MKVMUXER_MKVMUXERUTIL_H_
+#define MKVMUXER_MKVMUXERUTIL_H_
+
+#include "mkvmuxertypes.h"
+
+#include "stdint.h"
+
+namespace mkvmuxer {
+class Cluster;
+class Frame;
+class IMkvWriter;
+
+// TODO(tomfinegan): mkvmuxer:: integer types continue to be used here because
+// changing them causes pain for downstream projects. It would be nice if a
+// solution that allows removal of the mkvmuxer:: integer types while avoiding
+// pain for downstream users of libwebm. Considering that mkvmuxerutil.{cc,h}
+// are really, for the great majority of cases, EBML size calculation and writer
+// functions, perhaps a more EBML focused utility would be the way to go as a
+// first step.
+
+const uint64 kEbmlUnknownValue = 0x01FFFFFFFFFFFFFFULL;
+const int64 kMaxBlockTimecode = 0x07FFFLL;
+
+// Writes out |value| in Big Endian order. Returns 0 on success.
+int32 SerializeInt(IMkvWriter* writer, int64 value, int32 size);
+
+// Returns the size in bytes of the element.
+int32 GetUIntSize(uint64 value);
+int32 GetIntSize(int64 value);
+int32 GetCodedUIntSize(uint64 value);
+uint64 EbmlMasterElementSize(uint64 type, uint64 value);
+uint64 EbmlElementSize(uint64 type, int64 value);
+uint64 EbmlElementSize(uint64 type, uint64 value);
+uint64 EbmlElementSize(uint64 type, float value);
+uint64 EbmlElementSize(uint64 type, const char* value);
+uint64 EbmlElementSize(uint64 type, const uint8* value, uint64 size);
+uint64 EbmlDateElementSize(uint64 type);
+
+// Returns the size in bytes of the element assuming that the element was
+// written using |fixed_size| bytes. If |fixed_size| is set to zero, then it
+// computes the necessary number of bytes based on |value|.
+uint64 EbmlElementSize(uint64 type, uint64 value, uint64 fixed_size);
+
+// Creates an EBML coded number from |value| and writes it out. The size of
+// the coded number is determined by the value of |value|. |value| must not
+// be in a coded form. Returns 0 on success.
+int32 WriteUInt(IMkvWriter* writer, uint64 value);
+
+// Creates an EBML coded number from |value| and writes it out. The size of
+// the coded number is determined by the value of |size|. |value| must not
+// be in a coded form. Returns 0 on success.
+int32 WriteUIntSize(IMkvWriter* writer, uint64 value, int32 size);
+
+// Output an Mkv master element. Returns true if the element was written.
+bool WriteEbmlMasterElement(IMkvWriter* writer, uint64 value, uint64 size);
+
+// Outputs an Mkv ID, calls |IMkvWriter::ElementStartNotify|, and passes the
+// ID to |SerializeInt|. Returns 0 on success.
+int32 WriteID(IMkvWriter* writer, uint64 type);
+
+// Output an Mkv non-master element. Returns true if the element was written.
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value);
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, int64 value);
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, float value);
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const char* value);
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, const uint8* value,
+ uint64 size);
+bool WriteEbmlDateElement(IMkvWriter* writer, uint64 type, int64 value);
+
+// Output an Mkv non-master element using fixed size. The element will be
+// written out using exactly |fixed_size| bytes. If |fixed_size| is set to zero
+// then it computes the necessary number of bytes based on |value|. Returns true
+// if the element was written.
+bool WriteEbmlElement(IMkvWriter* writer, uint64 type, uint64 value,
+ uint64 fixed_size);
+
+// Output a Mkv Frame. It decides the correct element to write (Block vs
+// SimpleBlock) based on the parameters of the Frame.
+uint64 WriteFrame(IMkvWriter* writer, const Frame* const frame,
+ Cluster* cluster);
+
+// Output a void element. |size| must be the entire size in bytes that will be
+// void. The function will calculate the size of the void header and subtract
+// it from |size|.
+uint64 WriteVoidElement(IMkvWriter* writer, uint64 size);
+
+// Returns the version number of the muxer in |major|, |minor|, |build|,
+// and |revision|.
+void GetVersion(int32* major, int32* minor, int32* build, int32* revision);
+
+// Returns a random number to be used for UID, using |seed| to seed
+// the random-number generator (see POSIX rand_r() for semantics).
+uint64 MakeUID(unsigned int* seed);
+
+// Colour field validation helpers. All return true when |value| is valid.
+bool IsMatrixCoefficientsValueValid(uint64_t value);
+bool IsChromaSitingHorzValueValid(uint64_t value);
+bool IsChromaSitingVertValueValid(uint64_t value);
+bool IsColourRangeValueValid(uint64_t value);
+bool IsTransferCharacteristicsValueValid(uint64_t value);
+bool IsPrimariesValueValid(uint64_t value);
+
+} // namespace mkvmuxer
+
+#endif // MKVMUXER_MKVMUXERUTIL_H_
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.cc b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.cc
new file mode 100644
index 000000000..84655d802
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.cc
@@ -0,0 +1,90 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#include "mkvmuxer/mkvwriter.h"
+
+#include <sys/types.h>
+
+#ifdef _MSC_VER
+#include <share.h> // for _SH_DENYWR
+#endif
+
+namespace mkvmuxer {
+
+MkvWriter::MkvWriter() : file_(NULL), writer_owns_file_(true) {}
+
+MkvWriter::MkvWriter(FILE* fp) : file_(fp), writer_owns_file_(false) {}
+
+MkvWriter::~MkvWriter() { Close(); }
+
+int32 MkvWriter::Write(const void* buffer, uint32 length) {
+ if (!file_)
+ return -1;
+
+ if (length == 0)
+ return 0;
+
+ if (buffer == NULL)
+ return -1;
+
+ const size_t bytes_written = fwrite(buffer, 1, length, file_);
+
+ return (bytes_written == length) ? 0 : -1;
+}
+
+bool MkvWriter::Open(const char* filename) {
+ if (filename == NULL)
+ return false;
+
+ if (file_)
+ return false;
+
+#ifdef _MSC_VER
+ file_ = _fsopen(filename, "wb", _SH_DENYWR);
+#else
+ file_ = fopen(filename, "wb");
+#endif
+ if (file_ == NULL)
+ return false;
+ return true;
+}
+
+void MkvWriter::Close() {
+ if (file_ && writer_owns_file_) {
+ fclose(file_);
+ }
+ file_ = NULL;
+}
+
+int64 MkvWriter::Position() const {
+ if (!file_)
+ return 0;
+
+#ifdef _MSC_VER
+ return _ftelli64(file_);
+#else
+ return ftell(file_);
+#endif
+}
+
+int32 MkvWriter::Position(int64 position) {
+ if (!file_)
+ return -1;
+
+#ifdef _MSC_VER
+ return _fseeki64(file_, position, SEEK_SET);
+#else
+ return fseeko(file_, static_cast<off_t>(position), SEEK_SET);
+#endif
+}
+
+bool MkvWriter::Seekable() const { return true; }
+
+void MkvWriter::ElementStartNotify(uint64, int64) {}
+
+} // namespace mkvmuxer
diff --git a/media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.h b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.h
new file mode 100644
index 000000000..4227c6374
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvmuxer/mkvwriter.h
@@ -0,0 +1,51 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+
+#ifndef MKVMUXER_MKVWRITER_H_
+#define MKVMUXER_MKVWRITER_H_
+
+#include <stdio.h>
+
+#include "mkvmuxer/mkvmuxer.h"
+#include "mkvmuxer/mkvmuxertypes.h"
+
+namespace mkvmuxer {
+
+// Default implementation of the IMkvWriter interface on Windows.
+class MkvWriter : public IMkvWriter {
+ public:
+ MkvWriter();
+ explicit MkvWriter(FILE* fp);
+ virtual ~MkvWriter();
+
+ // IMkvWriter interface
+ virtual int64 Position() const;
+ virtual int32 Position(int64 position);
+ virtual bool Seekable() const;
+ virtual int32 Write(const void* buffer, uint32 length);
+ virtual void ElementStartNotify(uint64 element_id, int64 position);
+
+ // Creates and opens a file for writing. |filename| is the name of the file
+ // to open. This function will overwrite the contents of |filename|. Returns
+ // true on success.
+ bool Open(const char* filename);
+
+ // Closes an opened file.
+ void Close();
+
+ private:
+ // File handle to output file.
+ FILE* file_;
+ bool writer_owns_file_;
+
+ LIBWEBM_DISALLOW_COPY_AND_ASSIGN(MkvWriter);
+};
+
+} // namespace mkvmuxer
+
+#endif // MKVMUXER_MKVWRITER_H_
diff --git a/media/libaom/src/third_party/libwebm/mkvparser/mkvparser.cc b/media/libaom/src/third_party/libwebm/mkvparser/mkvparser.cc
new file mode 100644
index 000000000..e7b76f7da
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvparser/mkvparser.cc
@@ -0,0 +1,8049 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#include "mkvparser/mkvparser.h"
+
+#if defined(_MSC_VER) && _MSC_VER < 1800
+#include <float.h> // _isnan() / _finite()
+#define MSC_COMPAT
+#endif
+
+#include <cassert>
+#include <cfloat>
+#include <climits>
+#include <cmath>
+#include <cstring>
+#include <memory>
+#include <new>
+
+#include "common/webmids.h"
+
+namespace mkvparser {
+const long long kStringElementSizeLimit = 20 * 1000 * 1000;
+const float MasteringMetadata::kValueNotPresent = FLT_MAX;
+const long long Colour::kValueNotPresent = LLONG_MAX;
+const float Projection::kValueNotPresent = FLT_MAX;
+
+#ifdef MSC_COMPAT
+inline bool isnan(double val) { return !!_isnan(val); }
+inline bool isinf(double val) { return !_finite(val); }
+#else
+inline bool isnan(double val) { return std::isnan(val); }
+inline bool isinf(double val) { return std::isinf(val); }
+#endif // MSC_COMPAT
+
+IMkvReader::~IMkvReader() {}
+
+template <typename Type>
+Type* SafeArrayAlloc(unsigned long long num_elements,
+ unsigned long long element_size) {
+ if (num_elements == 0 || element_size == 0)
+ return NULL;
+
+ const size_t kMaxAllocSize = 0x80000000; // 2GiB
+ const unsigned long long num_bytes = num_elements * element_size;
+ if (element_size > (kMaxAllocSize / num_elements))
+ return NULL;
+ if (num_bytes != static_cast<size_t>(num_bytes))
+ return NULL;
+
+ return new (std::nothrow) Type[static_cast<size_t>(num_bytes)];
+}
+
+void GetVersion(int& major, int& minor, int& build, int& revision) {
+ major = 1;
+ minor = 0;
+ build = 0;
+ revision = 30;
+}
+
+long long ReadUInt(IMkvReader* pReader, long long pos, long& len) {
+ if (!pReader || pos < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ len = 1;
+ unsigned char b;
+ int status = pReader->Read(pos, 1, &b);
+
+ if (status < 0) // error or underflow
+ return status;
+
+ if (status > 0) // interpreted as "underflow"
+ return E_BUFFER_NOT_FULL;
+
+ if (b == 0) // we can't handle u-int values larger than 8 bytes
+ return E_FILE_FORMAT_INVALID;
+
+ unsigned char m = 0x80;
+
+ while (!(b & m)) {
+ m >>= 1;
+ ++len;
+ }
+
+ long long result = b & (~m);
+ ++pos;
+
+ for (int i = 1; i < len; ++i) {
+ status = pReader->Read(pos, 1, &b);
+
+ if (status < 0) {
+ len = 1;
+ return status;
+ }
+
+ if (status > 0) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result <<= 8;
+ result |= b;
+
+ ++pos;
+ }
+
+ return result;
+}
+
+// Reads an EBML ID and returns it.
+// An ID must at least 1 byte long, cannot exceed 4, and its value must be
+// greater than 0.
+// See known EBML values and EBMLMaxIDLength:
+// http://www.matroska.org/technical/specs/index.html
+// Returns the ID, or a value less than 0 to report an error while reading the
+// ID.
+long long ReadID(IMkvReader* pReader, long long pos, long& len) {
+ if (pReader == NULL || pos < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ // Read the first byte. The length in bytes of the ID is determined by
+ // finding the first set bit in the first byte of the ID.
+ unsigned char temp_byte = 0;
+ int read_status = pReader->Read(pos, 1, &temp_byte);
+
+ if (read_status < 0)
+ return E_FILE_FORMAT_INVALID;
+ else if (read_status > 0) // No data to read.
+ return E_BUFFER_NOT_FULL;
+
+ if (temp_byte == 0) // ID length > 8 bytes; invalid file.
+ return E_FILE_FORMAT_INVALID;
+
+ int bit_pos = 0;
+ const int kMaxIdLengthInBytes = 4;
+ const int kCheckByte = 0x80;
+
+ // Find the first bit that's set.
+ bool found_bit = false;
+ for (; bit_pos < kMaxIdLengthInBytes; ++bit_pos) {
+ if ((kCheckByte >> bit_pos) & temp_byte) {
+ found_bit = true;
+ break;
+ }
+ }
+
+ if (!found_bit) {
+ // The value is too large to be a valid ID.
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ // Read the remaining bytes of the ID (if any).
+ const int id_length = bit_pos + 1;
+ long long ebml_id = temp_byte;
+ for (int i = 1; i < id_length; ++i) {
+ ebml_id <<= 8;
+ read_status = pReader->Read(pos + i, 1, &temp_byte);
+
+ if (read_status < 0)
+ return E_FILE_FORMAT_INVALID;
+ else if (read_status > 0)
+ return E_BUFFER_NOT_FULL;
+
+ ebml_id |= temp_byte;
+ }
+
+ len = id_length;
+ return ebml_id;
+}
+
+long long GetUIntLength(IMkvReader* pReader, long long pos, long& len) {
+ if (!pReader || pos < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ long long total, available;
+
+ int status = pReader->Length(&total, &available);
+ if (status < 0 || (total >= 0 && available > total))
+ return E_FILE_FORMAT_INVALID;
+
+ len = 1;
+
+ if (pos >= available)
+ return pos; // too few bytes available
+
+ unsigned char b;
+
+ status = pReader->Read(pos, 1, &b);
+
+ if (status != 0)
+ return status;
+
+ if (b == 0) // we can't handle u-int values larger than 8 bytes
+ return E_FILE_FORMAT_INVALID;
+
+ unsigned char m = 0x80;
+
+ while (!(b & m)) {
+ m >>= 1;
+ ++len;
+ }
+
+ return 0; // success
+}
+
+// TODO(vigneshv): This function assumes that unsigned values never have their
+// high bit set.
+long long UnserializeUInt(IMkvReader* pReader, long long pos, long long size) {
+ if (!pReader || pos < 0 || (size <= 0) || (size > 8))
+ return E_FILE_FORMAT_INVALID;
+
+ long long result = 0;
+
+ for (long long i = 0; i < size; ++i) {
+ unsigned char b;
+
+ const long status = pReader->Read(pos, 1, &b);
+
+ if (status < 0)
+ return status;
+
+ result <<= 8;
+ result |= b;
+
+ ++pos;
+ }
+
+ return result;
+}
+
+long UnserializeFloat(IMkvReader* pReader, long long pos, long long size_,
+ double& result) {
+ if (!pReader || pos < 0 || ((size_ != 4) && (size_ != 8)))
+ return E_FILE_FORMAT_INVALID;
+
+ const long size = static_cast<long>(size_);
+
+ unsigned char buf[8];
+
+ const int status = pReader->Read(pos, size, buf);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 4) {
+ union {
+ float f;
+ unsigned long ff;
+ };
+
+ ff = 0;
+
+ for (int i = 0;;) {
+ ff |= buf[i];
+
+ if (++i >= 4)
+ break;
+
+ ff <<= 8;
+ }
+
+ result = f;
+ } else {
+ union {
+ double d;
+ unsigned long long dd;
+ };
+
+ dd = 0;
+
+ for (int i = 0;;) {
+ dd |= buf[i];
+
+ if (++i >= 8)
+ break;
+
+ dd <<= 8;
+ }
+
+ result = d;
+ }
+
+ if (mkvparser::isinf(result) || mkvparser::isnan(result))
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+}
+
+long UnserializeInt(IMkvReader* pReader, long long pos, long long size,
+ long long& result_ref) {
+ if (!pReader || pos < 0 || size < 1 || size > 8)
+ return E_FILE_FORMAT_INVALID;
+
+ signed char first_byte = 0;
+ const long status = pReader->Read(pos, 1, (unsigned char*)&first_byte);
+
+ if (status < 0)
+ return status;
+
+ unsigned long long result = first_byte;
+ ++pos;
+
+ for (long i = 1; i < size; ++i) {
+ unsigned char b;
+
+ const long status = pReader->Read(pos, 1, &b);
+
+ if (status < 0)
+ return status;
+
+ result <<= 8;
+ result |= b;
+
+ ++pos;
+ }
+
+ result_ref = static_cast<long long>(result);
+ return 0;
+}
+
+long UnserializeString(IMkvReader* pReader, long long pos, long long size,
+ char*& str) {
+ delete[] str;
+ str = NULL;
+
+ if (size >= LONG_MAX || size < 0 || size > kStringElementSizeLimit)
+ return E_FILE_FORMAT_INVALID;
+
+ // +1 for '\0' terminator
+ const long required_size = static_cast<long>(size) + 1;
+
+ str = SafeArrayAlloc<char>(1, required_size);
+ if (str == NULL)
+ return E_FILE_FORMAT_INVALID;
+
+ unsigned char* const buf = reinterpret_cast<unsigned char*>(str);
+
+ const long status = pReader->Read(pos, static_cast<long>(size), buf);
+
+ if (status) {
+ delete[] str;
+ str = NULL;
+
+ return status;
+ }
+
+ str[required_size - 1] = '\0';
+ return 0;
+}
+
+long ParseElementHeader(IMkvReader* pReader, long long& pos, long long stop,
+ long long& id, long long& size) {
+ if (stop >= 0 && pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ long len;
+
+ id = ReadID(pReader, pos, len);
+
+ if (id < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume id
+
+ if (stop >= 0 && pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ size = ReadUInt(pReader, pos, len);
+
+ if (size < 0 || len < 1 || len > 8) {
+ // Invalid: Negative payload size, negative or 0 length integer, or integer
+ // larger than 64 bits (libwebm cannot handle them).
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ // Avoid rolling over pos when very close to LLONG_MAX.
+ const unsigned long long rollover_check =
+ static_cast<unsigned long long>(pos) + len;
+ if (rollover_check > LLONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume length of size
+
+ // pos now designates payload
+
+ if (stop >= 0 && pos > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0; // success
+}
+
+bool Match(IMkvReader* pReader, long long& pos, unsigned long expected_id,
+ long long& val) {
+ if (!pReader || pos < 0)
+ return false;
+
+ long long total = 0;
+ long long available = 0;
+
+ const long status = pReader->Length(&total, &available);
+ if (status < 0 || (total >= 0 && available > total))
+ return false;
+
+ long len = 0;
+
+ const long long id = ReadID(pReader, pos, len);
+ if (id < 0 || (available - pos) > len)
+ return false;
+
+ if (static_cast<unsigned long>(id) != expected_id)
+ return false;
+
+ pos += len; // consume id
+
+ const long long size = ReadUInt(pReader, pos, len);
+ if (size < 0 || size > 8 || len < 1 || len > 8 || (available - pos) > len)
+ return false;
+
+ pos += len; // consume length of size of payload
+
+ val = UnserializeUInt(pReader, pos, size);
+ if (val < 0)
+ return false;
+
+ pos += size; // consume size of payload
+
+ return true;
+}
+
+bool Match(IMkvReader* pReader, long long& pos, unsigned long expected_id,
+ unsigned char*& buf, size_t& buflen) {
+ if (!pReader || pos < 0)
+ return false;
+
+ long long total = 0;
+ long long available = 0;
+
+ long status = pReader->Length(&total, &available);
+ if (status < 0 || (total >= 0 && available > total))
+ return false;
+
+ long len = 0;
+ const long long id = ReadID(pReader, pos, len);
+ if (id < 0 || (available - pos) > len)
+ return false;
+
+ if (static_cast<unsigned long>(id) != expected_id)
+ return false;
+
+ pos += len; // consume id
+
+ const long long size = ReadUInt(pReader, pos, len);
+ if (size < 0 || len <= 0 || len > 8 || (available - pos) > len)
+ return false;
+
+ unsigned long long rollover_check =
+ static_cast<unsigned long long>(pos) + len;
+ if (rollover_check > LLONG_MAX)
+ return false;
+
+ pos += len; // consume length of size of payload
+
+ rollover_check = static_cast<unsigned long long>(pos) + size;
+ if (rollover_check > LLONG_MAX)
+ return false;
+
+ if ((pos + size) > available)
+ return false;
+
+ if (size >= LONG_MAX)
+ return false;
+
+ const long buflen_ = static_cast<long>(size);
+
+ buf = SafeArrayAlloc<unsigned char>(1, buflen_);
+ if (!buf)
+ return false;
+
+ status = pReader->Read(pos, buflen_, buf);
+ if (status != 0)
+ return false;
+
+ buflen = buflen_;
+
+ pos += size; // consume size of payload
+ return true;
+}
+
+EBMLHeader::EBMLHeader() : m_docType(NULL) { Init(); }
+
+EBMLHeader::~EBMLHeader() { delete[] m_docType; }
+
+void EBMLHeader::Init() {
+ m_version = 1;
+ m_readVersion = 1;
+ m_maxIdLength = 4;
+ m_maxSizeLength = 8;
+
+ if (m_docType) {
+ delete[] m_docType;
+ m_docType = NULL;
+ }
+
+ m_docTypeVersion = 1;
+ m_docTypeReadVersion = 1;
+}
+
+long long EBMLHeader::Parse(IMkvReader* pReader, long long& pos) {
+ if (!pReader)
+ return E_FILE_FORMAT_INVALID;
+
+ long long total, available;
+
+ long status = pReader->Length(&total, &available);
+
+ if (status < 0) // error
+ return status;
+
+ pos = 0;
+
+ // Scan until we find what looks like the first byte of the EBML header.
+ const long long kMaxScanBytes = (available >= 1024) ? 1024 : available;
+ const unsigned char kEbmlByte0 = 0x1A;
+ unsigned char scan_byte = 0;
+
+ while (pos < kMaxScanBytes) {
+ status = pReader->Read(pos, 1, &scan_byte);
+
+ if (status < 0) // error
+ return status;
+ else if (status > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if (scan_byte == kEbmlByte0)
+ break;
+
+ ++pos;
+ }
+
+ long len = 0;
+ const long long ebml_id = ReadID(pReader, pos, len);
+
+ if (ebml_id == E_BUFFER_NOT_FULL)
+ return E_BUFFER_NOT_FULL;
+
+ if (len != 4 || ebml_id != libwebm::kMkvEBML)
+ return E_FILE_FORMAT_INVALID;
+
+ // Move read pos forward to the EBML header size field.
+ pos += 4;
+
+ // Read length of size field.
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return E_FILE_FORMAT_INVALID;
+ else if (result > 0) // need more data
+ return E_BUFFER_NOT_FULL;
+
+ if (len < 1 || len > 8)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && ((total - pos) < len))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((available - pos) < len)
+ return pos + len; // try again later
+
+ // Read the EBML header size.
+ result = ReadUInt(pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ pos += len; // consume size field
+
+ // pos now designates start of payload
+
+ if ((total >= 0) && ((total - pos) < result))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((available - pos) < result)
+ return pos + result;
+
+ const long long end = pos + result;
+
+ Init();
+
+ while (pos < end) {
+ long long id, size;
+
+ status = ParseElementHeader(pReader, pos, end, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == libwebm::kMkvEBMLVersion) {
+ m_version = UnserializeUInt(pReader, pos, size);
+
+ if (m_version <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvEBMLReadVersion) {
+ m_readVersion = UnserializeUInt(pReader, pos, size);
+
+ if (m_readVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvEBMLMaxIDLength) {
+ m_maxIdLength = UnserializeUInt(pReader, pos, size);
+
+ if (m_maxIdLength <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvEBMLMaxSizeLength) {
+ m_maxSizeLength = UnserializeUInt(pReader, pos, size);
+
+ if (m_maxSizeLength <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvDocType) {
+ if (m_docType)
+ return E_FILE_FORMAT_INVALID;
+
+ status = UnserializeString(pReader, pos, size, m_docType);
+
+ if (status) // error
+ return status;
+ } else if (id == libwebm::kMkvDocTypeVersion) {
+ m_docTypeVersion = UnserializeUInt(pReader, pos, size);
+
+ if (m_docTypeVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvDocTypeReadVersion) {
+ m_docTypeReadVersion = UnserializeUInt(pReader, pos, size);
+
+ if (m_docTypeReadVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ pos += size;
+ }
+
+ if (pos != end)
+ return E_FILE_FORMAT_INVALID;
+
+ // Make sure DocType, DocTypeReadVersion, and DocTypeVersion are valid.
+ if (m_docType == NULL || m_docTypeReadVersion <= 0 || m_docTypeVersion <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ // Make sure EBMLMaxIDLength and EBMLMaxSizeLength are valid.
+ if (m_maxIdLength <= 0 || m_maxIdLength > 4 || m_maxSizeLength <= 0 ||
+ m_maxSizeLength > 8)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+}
+
+Segment::Segment(IMkvReader* pReader, long long elem_start,
+ // long long elem_size,
+ long long start, long long size)
+ : m_pReader(pReader),
+ m_element_start(elem_start),
+ // m_element_size(elem_size),
+ m_start(start),
+ m_size(size),
+ m_pos(start),
+ m_pUnknownSize(0),
+ m_pSeekHead(NULL),
+ m_pInfo(NULL),
+ m_pTracks(NULL),
+ m_pCues(NULL),
+ m_pChapters(NULL),
+ m_pTags(NULL),
+ m_clusters(NULL),
+ m_clusterCount(0),
+ m_clusterPreloadCount(0),
+ m_clusterSize(0) {}
+
+Segment::~Segment() {
+ const long count = m_clusterCount + m_clusterPreloadCount;
+
+ Cluster** i = m_clusters;
+ Cluster** j = m_clusters + count;
+
+ while (i != j) {
+ Cluster* const p = *i++;
+ delete p;
+ }
+
+ delete[] m_clusters;
+
+ delete m_pTracks;
+ delete m_pInfo;
+ delete m_pCues;
+ delete m_pChapters;
+ delete m_pTags;
+ delete m_pSeekHead;
+}
+
+long long Segment::CreateInstance(IMkvReader* pReader, long long pos,
+ Segment*& pSegment) {
+ if (pReader == NULL || pos < 0)
+ return E_PARSE_FAILED;
+
+ pSegment = NULL;
+
+ long long total, available;
+
+ const long status = pReader->Length(&total, &available);
+
+ if (status < 0) // error
+ return status;
+
+ if (available < 0)
+ return -1;
+
+ if ((total >= 0) && (available > total))
+ return -1;
+
+ // I would assume that in practice this loop would execute
+ // exactly once, but we allow for other elements (e.g. Void)
+ // to immediately follow the EBML header. This is fine for
+ // the source filter case (since the entire file is available),
+ // but in the splitter case over a network we should probably
+ // just give up early. We could for example decide only to
+ // execute this loop a maximum of, say, 10 times.
+ // TODO:
+ // There is an implied "give up early" by only parsing up
+ // to the available limit. We do do that, but only if the
+ // total file size is unknown. We could decide to always
+ // use what's available as our limit (irrespective of whether
+ // we happen to know the total file length). This would have
+ // as its sense "parse this much of the file before giving up",
+ // which a slightly different sense from "try to parse up to
+ // 10 EMBL elements before giving up".
+
+ for (;;) {
+ if ((total >= 0) && (pos >= total))
+ return E_FILE_FORMAT_INVALID;
+
+ // Read ID
+ long len;
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result) // error, or too few available bytes
+ return result;
+
+ if ((total >= 0) && ((pos + len) > total))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > available)
+ return pos + len;
+
+ const long long idpos = pos;
+ const long long id = ReadID(pReader, pos, len);
+
+ if (id < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume ID
+
+ // Read Size
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result) // error, or too few available bytes
+ return result;
+
+ if ((total >= 0) && ((pos + len) > total))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > available)
+ return pos + len;
+
+ long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return size;
+
+ pos += len; // consume length of size of element
+
+ // Pos now points to start of payload
+
+ // Handle "unknown size" for live streaming of webm files.
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (id == libwebm::kMkvSegment) {
+ if (size == unknown_size)
+ size = -1;
+
+ else if (total < 0)
+ size = -1;
+
+ else if ((pos + size) > total)
+ size = -1;
+
+ pSegment = new (std::nothrow) Segment(pReader, idpos, pos, size);
+ if (pSegment == NULL)
+ return E_PARSE_FAILED;
+
+ return 0; // success
+ }
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && ((pos + size) > total))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + size) > available)
+ return pos + size;
+
+ pos += size; // consume payload
+ }
+}
+
+long long Segment::ParseHeaders() {
+ // Outermost (level 0) segment object has been constructed,
+ // and pos designates start of payload. We need to find the
+ // inner (level 1) elements.
+ long long total, available;
+
+ const int status = m_pReader->Length(&total, &available);
+
+ if (status < 0) // error
+ return status;
+
+ if (total > 0 && available > total)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ if ((segment_stop >= 0 && total >= 0 && segment_stop > total) ||
+ (segment_stop >= 0 && m_pos > segment_stop)) {
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ for (;;) {
+ if ((total >= 0) && (m_pos >= total))
+ break;
+
+ if ((segment_stop >= 0) && (m_pos >= segment_stop))
+ break;
+
+ long long pos = m_pos;
+ const long long element_start = pos;
+
+ // Avoid rolling over pos when very close to LLONG_MAX.
+ unsigned long long rollover_check = pos + 1ULL;
+ if (rollover_check > LLONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + 1) > available)
+ return (pos + 1);
+
+ long len;
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ if (result > 0) {
+ // MkvReader doesn't have enough data to satisfy this read attempt.
+ return (pos + 1);
+ }
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > available)
+ return pos + len;
+
+ const long long idpos = pos;
+ const long long id = ReadID(m_pReader, idpos, len);
+
+ if (id < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == libwebm::kMkvCluster)
+ break;
+
+ pos += len; // consume ID
+
+ if ((pos + 1) > available)
+ return (pos + 1);
+
+ // Read Size
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return result;
+
+ if (result > 0) {
+ // MkvReader doesn't have enough data to satisfy this read attempt.
+ return (pos + 1);
+ }
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > available)
+ return pos + len;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0 || len < 1 || len > 8) {
+ // TODO(tomfinegan): ReadUInt should return an error when len is < 1 or
+ // len > 8 is true instead of checking this _everywhere_.
+ return size;
+ }
+
+ pos += len; // consume length of size of element
+
+ // Avoid rolling over pos when very close to LLONG_MAX.
+ rollover_check = static_cast<unsigned long long>(pos) + size;
+ if (rollover_check > LLONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long element_size = size + pos - element_start;
+
+ // Pos now points to start of payload
+
+ if ((segment_stop >= 0) && ((pos + size) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // We read EBML elements either in total or nothing at all.
+
+ if ((pos + size) > available)
+ return pos + size;
+
+ if (id == libwebm::kMkvInfo) {
+ if (m_pInfo)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pInfo = new (std::nothrow)
+ SegmentInfo(this, pos, size, element_start, element_size);
+
+ if (m_pInfo == NULL)
+ return -1;
+
+ const long status = m_pInfo->Parse();
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvTracks) {
+ if (m_pTracks)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pTracks = new (std::nothrow)
+ Tracks(this, pos, size, element_start, element_size);
+
+ if (m_pTracks == NULL)
+ return -1;
+
+ const long status = m_pTracks->Parse();
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvCues) {
+ if (m_pCues == NULL) {
+ m_pCues = new (std::nothrow)
+ Cues(this, pos, size, element_start, element_size);
+
+ if (m_pCues == NULL)
+ return -1;
+ }
+ } else if (id == libwebm::kMkvSeekHead) {
+ if (m_pSeekHead == NULL) {
+ m_pSeekHead = new (std::nothrow)
+ SeekHead(this, pos, size, element_start, element_size);
+
+ if (m_pSeekHead == NULL)
+ return -1;
+
+ const long status = m_pSeekHead->Parse();
+
+ if (status)
+ return status;
+ }
+ } else if (id == libwebm::kMkvChapters) {
+ if (m_pChapters == NULL) {
+ m_pChapters = new (std::nothrow)
+ Chapters(this, pos, size, element_start, element_size);
+
+ if (m_pChapters == NULL)
+ return -1;
+
+ const long status = m_pChapters->Parse();
+
+ if (status)
+ return status;
+ }
+ } else if (id == libwebm::kMkvTags) {
+ if (m_pTags == NULL) {
+ m_pTags = new (std::nothrow)
+ Tags(this, pos, size, element_start, element_size);
+
+ if (m_pTags == NULL)
+ return -1;
+
+ const long status = m_pTags->Parse();
+
+ if (status)
+ return status;
+ }
+ }
+
+ m_pos = pos + size; // consume payload
+ }
+
+ if (segment_stop >= 0 && m_pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (m_pInfo == NULL) // TODO: liberalize this behavior
+ return E_FILE_FORMAT_INVALID;
+
+ if (m_pTracks == NULL)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0; // success
+}
+
+long Segment::LoadCluster(long long& pos, long& len) {
+ for (;;) {
+ const long result = DoLoadCluster(pos, len);
+
+ if (result <= 1)
+ return result;
+ }
+}
+
+long Segment::DoLoadCluster(long long& pos, long& len) {
+ if (m_pos < 0)
+ return DoLoadClusterUnknownSize(pos, len);
+
+ long long total, avail;
+
+ long status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ if (total >= 0 && avail > total)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ long long cluster_off = -1; // offset relative to start of segment
+ long long cluster_size = -1; // size of cluster payload
+
+ for (;;) {
+ if ((total >= 0) && (m_pos >= total))
+ return 1; // no more clusters
+
+ if ((segment_stop >= 0) && (m_pos >= segment_stop))
+ return 1; // no more clusters
+
+ pos = m_pos;
+
+ // Read ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long idpos = pos;
+ const long long id = ReadID(m_pReader, idpos, len);
+
+ if (id < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume ID
+
+ // Read Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume length of size of element
+
+ // pos now points to start of payload
+
+ if (size == 0) {
+ // Missing element payload: move on.
+ m_pos = pos;
+ continue;
+ }
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if ((segment_stop >= 0) && (size != unknown_size) &&
+ ((pos + size) > segment_stop)) {
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (id == libwebm::kMkvCues) {
+ if (size == unknown_size) {
+ // Cues element of unknown size: Not supported.
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (m_pCues == NULL) {
+ const long long element_size = (pos - idpos) + size;
+
+ m_pCues = new (std::nothrow) Cues(this, pos, size, idpos, element_size);
+ if (m_pCues == NULL)
+ return -1;
+ }
+
+ m_pos = pos + size; // consume payload
+ continue;
+ }
+
+ if (id != libwebm::kMkvCluster) {
+ // Besides the Segment, Libwebm allows only cluster elements of unknown
+ // size. Fail the parse upon encountering a non-cluster element reporting
+ // unknown size.
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pos = pos + size; // consume payload
+ continue;
+ }
+
+ // We have a cluster.
+
+ cluster_off = idpos - m_start; // relative pos
+
+ if (size != unknown_size)
+ cluster_size = size;
+
+ break;
+ }
+
+ if (cluster_off < 0) {
+ // No cluster, die.
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ long long pos_;
+ long len_;
+
+ status = Cluster::HasBlockEntries(this, cluster_off, pos_, len_);
+
+ if (status < 0) { // error, or underflow
+ pos = pos_;
+ len = len_;
+
+ return status;
+ }
+
+ // status == 0 means "no block entries found"
+ // status > 0 means "found at least one block entry"
+
+ // TODO:
+ // The issue here is that the segment increments its own
+ // pos ptr past the most recent cluster parsed, and then
+ // starts from there to parse the next cluster. If we
+ // don't know the size of the current cluster, then we
+ // must either parse its payload (as we do below), looking
+ // for the cluster (or cues) ID to terminate the parse.
+ // This isn't really what we want: rather, we really need
+ // a way to create the curr cluster object immediately.
+ // The pity is that cluster::parse can determine its own
+ // boundary, and we largely duplicate that same logic here.
+ //
+ // Maybe we need to get rid of our look-ahead preloading
+ // in source::parse???
+ //
+ // As we're parsing the blocks in the curr cluster
+ //(in cluster::parse), we should have some way to signal
+ // to the segment that we have determined the boundary,
+ // so it can adjust its own segment::m_pos member.
+ //
+ // The problem is that we're asserting in asyncreadinit,
+ // because we adjust the pos down to the curr seek pos,
+ // and the resulting adjusted len is > 2GB. I'm suspicious
+ // that this is even correct, but even if it is, we can't
+ // be loading that much data in the cache anyway.
+
+ const long idx = m_clusterCount;
+
+ if (m_clusterPreloadCount > 0) {
+ if (idx >= m_clusterSize)
+ return E_FILE_FORMAT_INVALID;
+
+ Cluster* const pCluster = m_clusters[idx];
+ if (pCluster == NULL || pCluster->m_index >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long off = pCluster->GetPosition();
+ if (off < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (off == cluster_off) { // preloaded already
+ if (status == 0) // no entries found
+ return E_FILE_FORMAT_INVALID;
+
+ if (cluster_size >= 0)
+ pos += cluster_size;
+ else {
+ const long long element_size = pCluster->GetElementSize();
+
+ if (element_size <= 0)
+ return E_FILE_FORMAT_INVALID; // TODO: handle this case
+
+ pos = pCluster->m_element_start + element_size;
+ }
+
+ pCluster->m_index = idx; // move from preloaded to loaded
+ ++m_clusterCount;
+ --m_clusterPreloadCount;
+
+ m_pos = pos; // consume payload
+ if (segment_stop >= 0 && m_pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0; // success
+ }
+ }
+
+ if (status == 0) { // no entries found
+ if (cluster_size >= 0)
+ pos += cluster_size;
+
+ if ((total >= 0) && (pos >= total)) {
+ m_pos = total;
+ return 1; // no more clusters
+ }
+
+ if ((segment_stop >= 0) && (pos >= segment_stop)) {
+ m_pos = segment_stop;
+ return 1; // no more clusters
+ }
+
+ m_pos = pos;
+ return 2; // try again
+ }
+
+ // status > 0 means we have an entry
+
+ Cluster* const pCluster = Cluster::Create(this, idx, cluster_off);
+ if (pCluster == NULL)
+ return -1;
+
+ if (!AppendCluster(pCluster)) {
+ delete pCluster;
+ return -1;
+ }
+
+ if (cluster_size >= 0) {
+ pos += cluster_size;
+
+ m_pos = pos;
+
+ if (segment_stop > 0 && m_pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+ }
+
+ m_pUnknownSize = pCluster;
+ m_pos = -pos;
+
+ return 0; // partial success, since we have a new cluster
+
+ // status == 0 means "no block entries found"
+ // pos designates start of payload
+ // m_pos has NOT been adjusted yet (in case we need to come back here)
+}
+
+long Segment::DoLoadClusterUnknownSize(long long& pos, long& len) {
+ if (m_pos >= 0 || m_pUnknownSize == NULL)
+ return E_PARSE_FAILED;
+
+ const long status = m_pUnknownSize->Parse(pos, len);
+
+ if (status < 0) // error or underflow
+ return status;
+
+ if (status == 0) // parsed a block
+ return 2; // continue parsing
+
+ const long long start = m_pUnknownSize->m_element_start;
+ const long long size = m_pUnknownSize->GetElementSize();
+
+ if (size < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos = start + size;
+ m_pos = pos;
+
+ m_pUnknownSize = 0;
+
+ return 2; // continue parsing
+}
+
+bool Segment::AppendCluster(Cluster* pCluster) {
+ if (pCluster == NULL || pCluster->m_index < 0)
+ return false;
+
+ const long count = m_clusterCount + m_clusterPreloadCount;
+
+ long& size = m_clusterSize;
+ const long idx = pCluster->m_index;
+
+ if (size < count || idx != m_clusterCount)
+ return false;
+
+ if (count >= size) {
+ const long n = (size <= 0) ? 2048 : 2 * size;
+
+ Cluster** const qq = new (std::nothrow) Cluster*[n];
+ if (qq == NULL)
+ return false;
+
+ Cluster** q = qq;
+ Cluster** p = m_clusters;
+ Cluster** const pp = p + count;
+
+ while (p != pp)
+ *q++ = *p++;
+
+ delete[] m_clusters;
+
+ m_clusters = qq;
+ size = n;
+ }
+
+ if (m_clusterPreloadCount > 0) {
+ Cluster** const p = m_clusters + m_clusterCount;
+ if (*p == NULL || (*p)->m_index >= 0)
+ return false;
+
+ Cluster** q = p + m_clusterPreloadCount;
+ if (q >= (m_clusters + size))
+ return false;
+
+ for (;;) {
+ Cluster** const qq = q - 1;
+ if ((*qq)->m_index >= 0)
+ return false;
+
+ *q = *qq;
+ q = qq;
+
+ if (q == p)
+ break;
+ }
+ }
+
+ m_clusters[idx] = pCluster;
+ ++m_clusterCount;
+ return true;
+}
+
+bool Segment::PreloadCluster(Cluster* pCluster, ptrdiff_t idx) {
+ if (pCluster == NULL || pCluster->m_index >= 0 || idx < m_clusterCount)
+ return false;
+
+ const long count = m_clusterCount + m_clusterPreloadCount;
+
+ long& size = m_clusterSize;
+ if (size < count)
+ return false;
+
+ if (count >= size) {
+ const long n = (size <= 0) ? 2048 : 2 * size;
+
+ Cluster** const qq = new (std::nothrow) Cluster*[n];
+ if (qq == NULL)
+ return false;
+ Cluster** q = qq;
+
+ Cluster** p = m_clusters;
+ Cluster** const pp = p + count;
+
+ while (p != pp)
+ *q++ = *p++;
+
+ delete[] m_clusters;
+
+ m_clusters = qq;
+ size = n;
+ }
+
+ if (m_clusters == NULL)
+ return false;
+
+ Cluster** const p = m_clusters + idx;
+
+ Cluster** q = m_clusters + count;
+ if (q < p || q >= (m_clusters + size))
+ return false;
+
+ while (q > p) {
+ Cluster** const qq = q - 1;
+
+ if ((*qq)->m_index >= 0)
+ return false;
+
+ *q = *qq;
+ q = qq;
+ }
+
+ m_clusters[idx] = pCluster;
+ ++m_clusterPreloadCount;
+ return true;
+}
+
+long Segment::Load() {
+ if (m_clusters != NULL || m_clusterSize != 0 || m_clusterCount != 0)
+ return E_PARSE_FAILED;
+
+ // Outermost (level 0) segment object has been constructed,
+ // and pos designates start of payload. We need to find the
+ // inner (level 1) elements.
+
+ const long long header_status = ParseHeaders();
+
+ if (header_status < 0) // error
+ return static_cast<long>(header_status);
+
+ if (header_status > 0) // underflow
+ return E_BUFFER_NOT_FULL;
+
+ if (m_pInfo == NULL || m_pTracks == NULL)
+ return E_FILE_FORMAT_INVALID;
+
+ for (;;) {
+ const long status = LoadCluster();
+
+ if (status < 0) // error
+ return status;
+
+ if (status >= 1) // no more clusters
+ return 0;
+ }
+}
+
+SeekHead::Entry::Entry() : id(0), pos(0), element_start(0), element_size(0) {}
+
+SeekHead::SeekHead(Segment* pSegment, long long start, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_entries(0),
+ m_entry_count(0),
+ m_void_elements(0),
+ m_void_element_count(0) {}
+
+SeekHead::~SeekHead() {
+ delete[] m_entries;
+ delete[] m_void_elements;
+}
+
+long SeekHead::Parse() {
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = m_start;
+ const long long stop = m_start + m_size;
+
+ // first count the seek head entries
+
+ int entry_count = 0;
+ int void_element_count = 0;
+
+ while (pos < stop) {
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvSeek)
+ ++entry_count;
+ else if (id == libwebm::kMkvVoid)
+ ++void_element_count;
+
+ pos += size; // consume payload
+
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (entry_count > 0) {
+ m_entries = new (std::nothrow) Entry[entry_count];
+
+ if (m_entries == NULL)
+ return -1;
+ }
+
+ if (void_element_count > 0) {
+ m_void_elements = new (std::nothrow) VoidElement[void_element_count];
+
+ if (m_void_elements == NULL)
+ return -1;
+ }
+
+ // now parse the entries and void elements
+
+ Entry* pEntry = m_entries;
+ VoidElement* pVoidElement = m_void_elements;
+
+ pos = m_start;
+
+ while (pos < stop) {
+ const long long idpos = pos;
+
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvSeek && entry_count > 0) {
+ if (ParseEntry(pReader, pos, size, pEntry)) {
+ Entry& e = *pEntry++;
+
+ e.element_start = idpos;
+ e.element_size = (pos + size) - idpos;
+ }
+ } else if (id == libwebm::kMkvVoid && void_element_count > 0) {
+ VoidElement& e = *pVoidElement++;
+
+ e.element_start = idpos;
+ e.element_size = (pos + size) - idpos;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ ptrdiff_t count_ = ptrdiff_t(pEntry - m_entries);
+ assert(count_ >= 0);
+ assert(count_ <= entry_count);
+
+ m_entry_count = static_cast<int>(count_);
+
+ count_ = ptrdiff_t(pVoidElement - m_void_elements);
+ assert(count_ >= 0);
+ assert(count_ <= void_element_count);
+
+ m_void_element_count = static_cast<int>(count_);
+
+ return 0;
+}
+
+int SeekHead::GetCount() const { return m_entry_count; }
+
+const SeekHead::Entry* SeekHead::GetEntry(int idx) const {
+ if (idx < 0)
+ return 0;
+
+ if (idx >= m_entry_count)
+ return 0;
+
+ return m_entries + idx;
+}
+
+int SeekHead::GetVoidElementCount() const { return m_void_element_count; }
+
+const SeekHead::VoidElement* SeekHead::GetVoidElement(int idx) const {
+ if (idx < 0)
+ return 0;
+
+ if (idx >= m_void_element_count)
+ return 0;
+
+ return m_void_elements + idx;
+}
+
+long Segment::ParseCues(long long off, long long& pos, long& len) {
+ if (m_pCues)
+ return 0; // success
+
+ if (off < 0)
+ return -1;
+
+ long long total, avail;
+
+ const int status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ pos = m_start + off;
+
+ if ((total < 0) || (pos >= total))
+ return 1; // don't bother parsing cues
+
+ const long long element_start = pos;
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // underflow (weird)
+ {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long idpos = pos;
+
+ const long long id = ReadID(m_pReader, idpos, len);
+
+ if (id != libwebm::kMkvCues)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume ID
+ assert((segment_stop < 0) || (pos <= segment_stop));
+
+ // Read Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // underflow (weird)
+ {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ if (size == 0) // weird, although technically not illegal
+ return 1; // done
+
+ pos += len; // consume length of size of element
+ assert((segment_stop < 0) || (pos <= segment_stop));
+
+ // Pos now points to start of payload
+
+ const long long element_stop = pos + size;
+
+ if ((segment_stop >= 0) && (element_stop > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && (element_stop > total))
+ return 1; // don't bother parsing anymore
+
+ len = static_cast<long>(size);
+
+ if (element_stop > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long element_size = element_stop - element_start;
+
+ m_pCues =
+ new (std::nothrow) Cues(this, pos, size, element_start, element_size);
+ if (m_pCues == NULL)
+ return -1;
+
+ return 0; // success
+}
+
+bool SeekHead::ParseEntry(IMkvReader* pReader, long long start, long long size_,
+ Entry* pEntry) {
+ if (size_ <= 0)
+ return false;
+
+ long long pos = start;
+ const long long stop = start + size_;
+
+ long len;
+
+ // parse the container for the level-1 element ID
+
+ const long long seekIdId = ReadID(pReader, pos, len);
+ if (seekIdId < 0)
+ return false;
+
+ if (seekIdId != libwebm::kMkvSeekID)
+ return false;
+
+ if ((pos + len) > stop)
+ return false;
+
+ pos += len; // consume SeekID id
+
+ const long long seekIdSize = ReadUInt(pReader, pos, len);
+
+ if (seekIdSize <= 0)
+ return false;
+
+ if ((pos + len) > stop)
+ return false;
+
+ pos += len; // consume size of field
+
+ if ((pos + seekIdSize) > stop)
+ return false;
+
+ pEntry->id = ReadID(pReader, pos, len); // payload
+
+ if (pEntry->id <= 0)
+ return false;
+
+ if (len != seekIdSize)
+ return false;
+
+ pos += seekIdSize; // consume SeekID payload
+
+ const long long seekPosId = ReadID(pReader, pos, len);
+
+ if (seekPosId != libwebm::kMkvSeekPosition)
+ return false;
+
+ if ((pos + len) > stop)
+ return false;
+
+ pos += len; // consume id
+
+ const long long seekPosSize = ReadUInt(pReader, pos, len);
+
+ if (seekPosSize <= 0)
+ return false;
+
+ if ((pos + len) > stop)
+ return false;
+
+ pos += len; // consume size
+
+ if ((pos + seekPosSize) > stop)
+ return false;
+
+ pEntry->pos = UnserializeUInt(pReader, pos, seekPosSize);
+
+ if (pEntry->pos < 0)
+ return false;
+
+ pos += seekPosSize; // consume payload
+
+ if (pos != stop)
+ return false;
+
+ return true;
+}
+
+Cues::Cues(Segment* pSegment, long long start_, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start_),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_cue_points(NULL),
+ m_count(0),
+ m_preload_count(0),
+ m_pos(start_) {}
+
+Cues::~Cues() {
+ const long n = m_count + m_preload_count;
+
+ CuePoint** p = m_cue_points;
+ CuePoint** const q = p + n;
+
+ while (p != q) {
+ CuePoint* const pCP = *p++;
+ assert(pCP);
+
+ delete pCP;
+ }
+
+ delete[] m_cue_points;
+}
+
+long Cues::GetCount() const {
+ if (m_cue_points == NULL)
+ return -1;
+
+ return m_count; // TODO: really ignore preload count?
+}
+
+bool Cues::DoneParsing() const {
+ const long long stop = m_start + m_size;
+ return (m_pos >= stop);
+}
+
+bool Cues::Init() const {
+ if (m_cue_points)
+ return true;
+
+ if (m_count != 0 || m_preload_count != 0)
+ return false;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ const long long stop = m_start + m_size;
+ long long pos = m_start;
+
+ long cue_points_size = 0;
+
+ while (pos < stop) {
+ const long long idpos = pos;
+
+ long len;
+
+ const long long id = ReadID(pReader, pos, len);
+ if (id < 0 || (pos + len) > stop) {
+ return false;
+ }
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ if (size < 0 || (pos + len > stop)) {
+ return false;
+ }
+
+ pos += len; // consume Size field
+ if (pos + size > stop) {
+ return false;
+ }
+
+ if (id == libwebm::kMkvCuePoint) {
+ if (!PreloadCuePoint(cue_points_size, idpos))
+ return false;
+ }
+
+ pos += size; // skip payload
+ }
+ return true;
+}
+
+bool Cues::PreloadCuePoint(long& cue_points_size, long long pos) const {
+ if (m_count != 0)
+ return false;
+
+ if (m_preload_count >= cue_points_size) {
+ const long n = (cue_points_size <= 0) ? 2048 : 2 * cue_points_size;
+
+ CuePoint** const qq = new (std::nothrow) CuePoint*[n];
+ if (qq == NULL)
+ return false;
+
+ CuePoint** q = qq; // beginning of target
+
+ CuePoint** p = m_cue_points; // beginning of source
+ CuePoint** const pp = p + m_preload_count; // end of source
+
+ while (p != pp)
+ *q++ = *p++;
+
+ delete[] m_cue_points;
+
+ m_cue_points = qq;
+ cue_points_size = n;
+ }
+
+ CuePoint* const pCP = new (std::nothrow) CuePoint(m_preload_count, pos);
+ if (pCP == NULL)
+ return false;
+
+ m_cue_points[m_preload_count++] = pCP;
+ return true;
+}
+
+bool Cues::LoadCuePoint() const {
+ const long long stop = m_start + m_size;
+
+ if (m_pos >= stop)
+ return false; // nothing else to do
+
+ if (!Init()) {
+ m_pos = stop;
+ return false;
+ }
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ while (m_pos < stop) {
+ const long long idpos = m_pos;
+
+ long len;
+
+ const long long id = ReadID(pReader, m_pos, len);
+ if (id < 0 || (m_pos + len) > stop)
+ return false;
+
+ m_pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, m_pos, len);
+ if (size < 0 || (m_pos + len) > stop)
+ return false;
+
+ m_pos += len; // consume Size field
+ if ((m_pos + size) > stop)
+ return false;
+
+ if (id != libwebm::kMkvCuePoint) {
+ m_pos += size; // consume payload
+ if (m_pos > stop)
+ return false;
+
+ continue;
+ }
+
+ if (m_preload_count < 1)
+ return false;
+
+ CuePoint* const pCP = m_cue_points[m_count];
+ if (!pCP || (pCP->GetTimeCode() < 0 && (-pCP->GetTimeCode() != idpos)))
+ return false;
+
+ if (!pCP->Load(pReader)) {
+ m_pos = stop;
+ return false;
+ }
+ ++m_count;
+ --m_preload_count;
+
+ m_pos += size; // consume payload
+ if (m_pos > stop)
+ return false;
+
+ return true; // yes, we loaded a cue point
+ }
+
+ return false; // no, we did not load a cue point
+}
+
+bool Cues::Find(long long time_ns, const Track* pTrack, const CuePoint*& pCP,
+ const CuePoint::TrackPosition*& pTP) const {
+ if (time_ns < 0 || pTrack == NULL || m_cue_points == NULL || m_count == 0)
+ return false;
+
+ CuePoint** const ii = m_cue_points;
+ CuePoint** i = ii;
+
+ CuePoint** const jj = ii + m_count;
+ CuePoint** j = jj;
+
+ pCP = *i;
+ if (pCP == NULL)
+ return false;
+
+ if (time_ns <= pCP->GetTime(m_pSegment)) {
+ pTP = pCP->Find(pTrack);
+ return (pTP != NULL);
+ }
+
+ while (i < j) {
+ // INVARIANT:
+ //[ii, i) <= time_ns
+ //[i, j) ?
+ //[j, jj) > time_ns
+
+ CuePoint** const k = i + (j - i) / 2;
+ if (k >= jj)
+ return false;
+
+ CuePoint* const pCP = *k;
+ if (pCP == NULL)
+ return false;
+
+ const long long t = pCP->GetTime(m_pSegment);
+
+ if (t <= time_ns)
+ i = k + 1;
+ else
+ j = k;
+
+ if (i > j)
+ return false;
+ }
+
+ if (i != j || i > jj || i <= ii)
+ return false;
+
+ pCP = *--i;
+
+ if (pCP == NULL || pCP->GetTime(m_pSegment) > time_ns)
+ return false;
+
+ // TODO: here and elsewhere, it's probably not correct to search
+ // for the cue point with this time, and then search for a matching
+ // track. In principle, the matching track could be on some earlier
+ // cue point, and with our current algorithm, we'd miss it. To make
+ // this bullet-proof, we'd need to create a secondary structure,
+ // with a list of cue points that apply to a track, and then search
+ // that track-based structure for a matching cue point.
+
+ pTP = pCP->Find(pTrack);
+ return (pTP != NULL);
+}
+
+const CuePoint* Cues::GetFirst() const {
+ if (m_cue_points == NULL || m_count == 0)
+ return NULL;
+
+ CuePoint* const* const pp = m_cue_points;
+ if (pp == NULL)
+ return NULL;
+
+ CuePoint* const pCP = pp[0];
+ if (pCP == NULL || pCP->GetTimeCode() < 0)
+ return NULL;
+
+ return pCP;
+}
+
+const CuePoint* Cues::GetLast() const {
+ if (m_cue_points == NULL || m_count <= 0)
+ return NULL;
+
+ const long index = m_count - 1;
+
+ CuePoint* const* const pp = m_cue_points;
+ if (pp == NULL)
+ return NULL;
+
+ CuePoint* const pCP = pp[index];
+ if (pCP == NULL || pCP->GetTimeCode() < 0)
+ return NULL;
+
+ return pCP;
+}
+
+const CuePoint* Cues::GetNext(const CuePoint* pCurr) const {
+ if (pCurr == NULL || pCurr->GetTimeCode() < 0 || m_cue_points == NULL ||
+ m_count < 1) {
+ return NULL;
+ }
+
+ long index = pCurr->m_index;
+ if (index >= m_count)
+ return NULL;
+
+ CuePoint* const* const pp = m_cue_points;
+ if (pp == NULL || pp[index] != pCurr)
+ return NULL;
+
+ ++index;
+
+ if (index >= m_count)
+ return NULL;
+
+ CuePoint* const pNext = pp[index];
+
+ if (pNext == NULL || pNext->GetTimeCode() < 0)
+ return NULL;
+
+ return pNext;
+}
+
+const BlockEntry* Cues::GetBlock(const CuePoint* pCP,
+ const CuePoint::TrackPosition* pTP) const {
+ if (pCP == NULL || pTP == NULL)
+ return NULL;
+
+ return m_pSegment->GetBlock(*pCP, *pTP);
+}
+
+const BlockEntry* Segment::GetBlock(const CuePoint& cp,
+ const CuePoint::TrackPosition& tp) {
+ Cluster** const ii = m_clusters;
+ Cluster** i = ii;
+
+ const long count = m_clusterCount + m_clusterPreloadCount;
+
+ Cluster** const jj = ii + count;
+ Cluster** j = jj;
+
+ while (i < j) {
+ // INVARIANT:
+ //[ii, i) < pTP->m_pos
+ //[i, j) ?
+ //[j, jj) > pTP->m_pos
+
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
+
+ Cluster* const pCluster = *k;
+ assert(pCluster);
+
+ // const long long pos_ = pCluster->m_pos;
+ // assert(pos_);
+ // const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
+
+ const long long pos = pCluster->GetPosition();
+ assert(pos >= 0);
+
+ if (pos < tp.m_pos)
+ i = k + 1;
+ else if (pos > tp.m_pos)
+ j = k;
+ else
+ return pCluster->GetEntry(cp, tp);
+ }
+
+ assert(i == j);
+ // assert(Cluster::HasBlockEntries(this, tp.m_pos));
+
+ Cluster* const pCluster = Cluster::Create(this, -1, tp.m_pos); //, -1);
+ if (pCluster == NULL)
+ return NULL;
+
+ const ptrdiff_t idx = i - m_clusters;
+
+ if (!PreloadCluster(pCluster, idx)) {
+ delete pCluster;
+ return NULL;
+ }
+ assert(m_clusters);
+ assert(m_clusterPreloadCount > 0);
+ assert(m_clusters[idx] == pCluster);
+
+ return pCluster->GetEntry(cp, tp);
+}
+
+const Cluster* Segment::FindOrPreloadCluster(long long requested_pos) {
+ if (requested_pos < 0)
+ return 0;
+
+ Cluster** const ii = m_clusters;
+ Cluster** i = ii;
+
+ const long count = m_clusterCount + m_clusterPreloadCount;
+
+ Cluster** const jj = ii + count;
+ Cluster** j = jj;
+
+ while (i < j) {
+ // INVARIANT:
+ //[ii, i) < pTP->m_pos
+ //[i, j) ?
+ //[j, jj) > pTP->m_pos
+
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
+
+ Cluster* const pCluster = *k;
+ assert(pCluster);
+
+ // const long long pos_ = pCluster->m_pos;
+ // assert(pos_);
+ // const long long pos = pos_ * ((pos_ < 0) ? -1 : 1);
+
+ const long long pos = pCluster->GetPosition();
+ assert(pos >= 0);
+
+ if (pos < requested_pos)
+ i = k + 1;
+ else if (pos > requested_pos)
+ j = k;
+ else
+ return pCluster;
+ }
+
+ assert(i == j);
+ // assert(Cluster::HasBlockEntries(this, tp.m_pos));
+
+ Cluster* const pCluster = Cluster::Create(this, -1, requested_pos);
+ if (pCluster == NULL)
+ return NULL;
+
+ const ptrdiff_t idx = i - m_clusters;
+
+ if (!PreloadCluster(pCluster, idx)) {
+ delete pCluster;
+ return NULL;
+ }
+ assert(m_clusters);
+ assert(m_clusterPreloadCount > 0);
+ assert(m_clusters[idx] == pCluster);
+
+ return pCluster;
+}
+
+CuePoint::CuePoint(long idx, long long pos)
+ : m_element_start(0),
+ m_element_size(0),
+ m_index(idx),
+ m_timecode(-1 * pos),
+ m_track_positions(NULL),
+ m_track_positions_count(0) {
+ assert(pos > 0);
+}
+
+CuePoint::~CuePoint() { delete[] m_track_positions; }
+
+bool CuePoint::Load(IMkvReader* pReader) {
+ // odbgstream os;
+ // os << "CuePoint::Load(begin): timecode=" << m_timecode << endl;
+
+ if (m_timecode >= 0) // already loaded
+ return true;
+
+ assert(m_track_positions == NULL);
+ assert(m_track_positions_count == 0);
+
+ long long pos_ = -m_timecode;
+ const long long element_start = pos_;
+
+ long long stop;
+
+ {
+ long len;
+
+ const long long id = ReadID(pReader, pos_, len);
+ if (id != libwebm::kMkvCuePoint)
+ return false;
+
+ pos_ += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos_, len);
+ assert(size >= 0);
+
+ pos_ += len; // consume Size field
+ // pos_ now points to start of payload
+
+ stop = pos_ + size;
+ }
+
+ const long long element_size = stop - element_start;
+
+ long long pos = pos_;
+
+ // First count number of track positions
+
+ while (pos < stop) {
+ long len;
+
+ const long long id = ReadID(pReader, pos, len);
+ if ((id < 0) || (pos + len > stop)) {
+ return false;
+ }
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ if ((size < 0) || (pos + len > stop)) {
+ return false;
+ }
+
+ pos += len; // consume Size field
+ if ((pos + size) > stop) {
+ return false;
+ }
+
+ if (id == libwebm::kMkvCueTime)
+ m_timecode = UnserializeUInt(pReader, pos, size);
+
+ else if (id == libwebm::kMkvCueTrackPositions)
+ ++m_track_positions_count;
+
+ pos += size; // consume payload
+ }
+
+ if (m_timecode < 0 || m_track_positions_count <= 0) {
+ return false;
+ }
+
+ // os << "CuePoint::Load(cont'd): idpos=" << idpos
+ // << " timecode=" << m_timecode
+ // << endl;
+
+ m_track_positions = new (std::nothrow) TrackPosition[m_track_positions_count];
+ if (m_track_positions == NULL)
+ return false;
+
+ // Now parse track positions
+
+ TrackPosition* p = m_track_positions;
+ pos = pos_;
+
+ while (pos < stop) {
+ long len;
+
+ const long long id = ReadID(pReader, pos, len);
+ if (id < 0 || (pos + len) > stop)
+ return false;
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0);
+ assert((pos + len) <= stop);
+
+ pos += len; // consume Size field
+ assert((pos + size) <= stop);
+
+ if (id == libwebm::kMkvCueTrackPositions) {
+ TrackPosition& tp = *p++;
+ if (!tp.Parse(pReader, pos, size)) {
+ return false;
+ }
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return false;
+ }
+
+ assert(size_t(p - m_track_positions) == m_track_positions_count);
+
+ m_element_start = element_start;
+ m_element_size = element_size;
+
+ return true;
+}
+
+bool CuePoint::TrackPosition::Parse(IMkvReader* pReader, long long start_,
+ long long size_) {
+ const long long stop = start_ + size_;
+ long long pos = start_;
+
+ m_track = -1;
+ m_pos = -1;
+ m_block = 1; // default
+
+ while (pos < stop) {
+ long len;
+
+ const long long id = ReadID(pReader, pos, len);
+ if ((id < 0) || ((pos + len) > stop)) {
+ return false;
+ }
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ if ((size < 0) || ((pos + len) > stop)) {
+ return false;
+ }
+
+ pos += len; // consume Size field
+ if ((pos + size) > stop) {
+ return false;
+ }
+
+ if (id == libwebm::kMkvCueTrack)
+ m_track = UnserializeUInt(pReader, pos, size);
+ else if (id == libwebm::kMkvCueClusterPosition)
+ m_pos = UnserializeUInt(pReader, pos, size);
+ else if (id == libwebm::kMkvCueBlockNumber)
+ m_block = UnserializeUInt(pReader, pos, size);
+
+ pos += size; // consume payload
+ }
+
+ if ((m_pos < 0) || (m_track <= 0)) {
+ return false;
+ }
+
+ return true;
+}
+
+const CuePoint::TrackPosition* CuePoint::Find(const Track* pTrack) const {
+ if (pTrack == NULL) {
+ return NULL;
+ }
+
+ const long long n = pTrack->GetNumber();
+
+ const TrackPosition* i = m_track_positions;
+ const TrackPosition* const j = i + m_track_positions_count;
+
+ while (i != j) {
+ const TrackPosition& p = *i++;
+
+ if (p.m_track == n)
+ return &p;
+ }
+
+ return NULL; // no matching track number found
+}
+
+long long CuePoint::GetTimeCode() const { return m_timecode; }
+
+long long CuePoint::GetTime(const Segment* pSegment) const {
+ assert(pSegment);
+ assert(m_timecode >= 0);
+
+ const SegmentInfo* const pInfo = pSegment->GetInfo();
+ assert(pInfo);
+
+ const long long scale = pInfo->GetTimeCodeScale();
+ assert(scale >= 1);
+
+ const long long time = scale * m_timecode;
+
+ return time;
+}
+
+bool Segment::DoneParsing() const {
+ if (m_size < 0) {
+ long long total, avail;
+
+ const int status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return true; // must assume done
+
+ if (total < 0)
+ return false; // assume live stream
+
+ return (m_pos >= total);
+ }
+
+ const long long stop = m_start + m_size;
+
+ return (m_pos >= stop);
+}
+
+const Cluster* Segment::GetFirst() const {
+ if ((m_clusters == NULL) || (m_clusterCount <= 0))
+ return &m_eos;
+
+ Cluster* const pCluster = m_clusters[0];
+ assert(pCluster);
+
+ return pCluster;
+}
+
+const Cluster* Segment::GetLast() const {
+ if ((m_clusters == NULL) || (m_clusterCount <= 0))
+ return &m_eos;
+
+ const long idx = m_clusterCount - 1;
+
+ Cluster* const pCluster = m_clusters[idx];
+ assert(pCluster);
+
+ return pCluster;
+}
+
+unsigned long Segment::GetCount() const { return m_clusterCount; }
+
+const Cluster* Segment::GetNext(const Cluster* pCurr) {
+ assert(pCurr);
+ assert(pCurr != &m_eos);
+ assert(m_clusters);
+
+ long idx = pCurr->m_index;
+
+ if (idx >= 0) {
+ assert(m_clusterCount > 0);
+ assert(idx < m_clusterCount);
+ assert(pCurr == m_clusters[idx]);
+
+ ++idx;
+
+ if (idx >= m_clusterCount)
+ return &m_eos; // caller will LoadCluster as desired
+
+ Cluster* const pNext = m_clusters[idx];
+ assert(pNext);
+ assert(pNext->m_index >= 0);
+ assert(pNext->m_index == idx);
+
+ return pNext;
+ }
+
+ assert(m_clusterPreloadCount > 0);
+
+ long long pos = pCurr->m_element_start;
+
+ assert(m_size >= 0); // TODO
+ const long long stop = m_start + m_size; // end of segment
+
+ {
+ long len;
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0);
+ assert((pos + len) <= stop); // TODO
+ if (result != 0)
+ return NULL;
+
+ const long long id = ReadID(m_pReader, pos, len);
+ if (id != libwebm::kMkvCluster)
+ return NULL;
+
+ pos += len; // consume ID
+
+ // Read Size
+ result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0); // TODO
+ assert((pos + len) <= stop); // TODO
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+ assert(size > 0); // TODO
+ // assert((pCurr->m_size <= 0) || (pCurr->m_size == size));
+
+ pos += len; // consume length of size of element
+ assert((pos + size) <= stop); // TODO
+
+ // Pos now points to start of payload
+
+ pos += size; // consume payload
+ }
+
+ long long off_next = 0;
+
+ while (pos < stop) {
+ long len;
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0);
+ assert((pos + len) <= stop); // TODO
+ if (result != 0)
+ return NULL;
+
+ const long long idpos = pos; // pos of next (potential) cluster
+
+ const long long id = ReadID(m_pReader, idpos, len);
+ if (id < 0)
+ return NULL;
+
+ pos += len; // consume ID
+
+ // Read Size
+ result = GetUIntLength(m_pReader, pos, len);
+ assert(result == 0); // TODO
+ assert((pos + len) <= stop); // TODO
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+ assert(size >= 0); // TODO
+
+ pos += len; // consume length of size of element
+ assert((pos + size) <= stop); // TODO
+
+ // Pos now points to start of payload
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == libwebm::kMkvCluster) {
+ const long long off_next_ = idpos - m_start;
+
+ long long pos_;
+ long len_;
+
+ const long status = Cluster::HasBlockEntries(this, off_next_, pos_, len_);
+
+ assert(status >= 0);
+
+ if (status > 0) {
+ off_next = off_next_;
+ break;
+ }
+ }
+
+ pos += size; // consume payload
+ }
+
+ if (off_next <= 0)
+ return 0;
+
+ Cluster** const ii = m_clusters + m_clusterCount;
+ Cluster** i = ii;
+
+ Cluster** const jj = ii + m_clusterPreloadCount;
+ Cluster** j = jj;
+
+ while (i < j) {
+ // INVARIANT:
+ //[0, i) < pos_next
+ //[i, j) ?
+ //[j, jj) > pos_next
+
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
+
+ Cluster* const pNext = *k;
+ assert(pNext);
+ assert(pNext->m_index < 0);
+
+ // const long long pos_ = pNext->m_pos;
+ // assert(pos_);
+ // pos = pos_ * ((pos_ < 0) ? -1 : 1);
+
+ pos = pNext->GetPosition();
+
+ if (pos < off_next)
+ i = k + 1;
+ else if (pos > off_next)
+ j = k;
+ else
+ return pNext;
+ }
+
+ assert(i == j);
+
+ Cluster* const pNext = Cluster::Create(this, -1, off_next);
+ if (pNext == NULL)
+ return NULL;
+
+ const ptrdiff_t idx_next = i - m_clusters; // insertion position
+
+ if (!PreloadCluster(pNext, idx_next)) {
+ delete pNext;
+ return NULL;
+ }
+ assert(m_clusters);
+ assert(idx_next < m_clusterSize);
+ assert(m_clusters[idx_next] == pNext);
+
+ return pNext;
+}
+
+long Segment::ParseNext(const Cluster* pCurr, const Cluster*& pResult,
+ long long& pos, long& len) {
+ assert(pCurr);
+ assert(!pCurr->EOS());
+ assert(m_clusters);
+
+ pResult = 0;
+
+ if (pCurr->m_index >= 0) { // loaded (not merely preloaded)
+ assert(m_clusters[pCurr->m_index] == pCurr);
+
+ const long next_idx = pCurr->m_index + 1;
+
+ if (next_idx < m_clusterCount) {
+ pResult = m_clusters[next_idx];
+ return 0; // success
+ }
+
+ // curr cluster is last among loaded
+
+ const long result = LoadCluster(pos, len);
+
+ if (result < 0) // error or underflow
+ return result;
+
+ if (result > 0) // no more clusters
+ {
+ // pResult = &m_eos;
+ return 1;
+ }
+
+ pResult = GetLast();
+ return 0; // success
+ }
+
+ assert(m_pos > 0);
+
+ long long total, avail;
+
+ long status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ // interrogate curr cluster
+
+ pos = pCurr->m_element_start;
+
+ if (pCurr->m_element_size >= 0)
+ pos += pCurr->m_element_size;
+ else {
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadUInt(m_pReader, pos, len);
+
+ if (id != libwebm::kMkvCluster)
+ return -1;
+
+ pos += len; // consume ID
+
+ // Read Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume size field
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size) // TODO: should never happen
+ return E_FILE_FORMAT_INVALID; // TODO: resolve this
+
+ // assert((pCurr->m_size <= 0) || (pCurr->m_size == size));
+
+ if ((segment_stop >= 0) && ((pos + size) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // Pos now points to start of payload
+
+ pos += size; // consume payload (that is, the current cluster)
+ if (segment_stop >= 0 && pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ // By consuming the payload, we are assuming that the curr
+ // cluster isn't interesting. That is, we don't bother checking
+ // whether the payload of the curr cluster is less than what
+ // happens to be available (obtained via IMkvReader::Length).
+ // Presumably the caller has already dispensed with the current
+ // cluster, and really does want the next cluster.
+ }
+
+ // pos now points to just beyond the last fully-loaded cluster
+
+ for (;;) {
+ const long status = DoParseNext(pResult, pos, len);
+
+ if (status <= 1)
+ return status;
+ }
+}
+
+long Segment::DoParseNext(const Cluster*& pResult, long long& pos, long& len) {
+ long long total, avail;
+
+ long status = m_pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ const long long segment_stop = (m_size < 0) ? -1 : m_start + m_size;
+
+ // Parse next cluster. This is strictly a parsing activity.
+ // Creation of a new cluster object happens later, after the
+ // parsing is done.
+
+ long long off_next = 0;
+ long long cluster_size = -1;
+
+ for (;;) {
+ if ((total >= 0) && (pos >= total))
+ return 1; // EOF
+
+ if ((segment_stop >= 0) && (pos >= segment_stop))
+ return 1; // EOF
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long idpos = pos; // absolute
+ const long long idoff = pos - m_start; // relative
+
+ const long long id = ReadID(m_pReader, idpos, len); // absolute
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id == 0) // weird
+ return -1; // generic error
+
+ pos += len; // consume ID
+
+ // Read Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume length of size of element
+
+ // Pos now points to start of payload
+
+ if (size == 0) // weird
+ continue;
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if ((segment_stop >= 0) && (size != unknown_size) &&
+ ((pos + size) > segment_stop)) {
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (id == libwebm::kMkvCues) {
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long element_stop = pos + size;
+
+ if ((segment_stop >= 0) && (element_stop > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ const long long element_start = idpos;
+ const long long element_size = element_stop - element_start;
+
+ if (m_pCues == NULL) {
+ m_pCues = new (std::nothrow)
+ Cues(this, pos, size, element_start, element_size);
+ if (m_pCues == NULL)
+ return false;
+ }
+
+ pos += size; // consume payload
+ if (segment_stop >= 0 && pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ continue;
+ }
+
+ if (id != libwebm::kMkvCluster) { // not a Cluster ID
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += size; // consume payload
+ if (segment_stop >= 0 && pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ continue;
+ }
+
+ // We have a cluster.
+ off_next = idoff;
+
+ if (size != unknown_size)
+ cluster_size = size;
+
+ break;
+ }
+
+ assert(off_next > 0); // have cluster
+
+ // We have parsed the next cluster.
+ // We have not created a cluster object yet. What we need
+ // to do now is determine whether it has already be preloaded
+ //(in which case, an object for this cluster has already been
+ // created), and if not, create a new cluster object.
+
+ Cluster** const ii = m_clusters + m_clusterCount;
+ Cluster** i = ii;
+
+ Cluster** const jj = ii + m_clusterPreloadCount;
+ Cluster** j = jj;
+
+ while (i < j) {
+ // INVARIANT:
+ //[0, i) < pos_next
+ //[i, j) ?
+ //[j, jj) > pos_next
+
+ Cluster** const k = i + (j - i) / 2;
+ assert(k < jj);
+
+ const Cluster* const pNext = *k;
+ assert(pNext);
+ assert(pNext->m_index < 0);
+
+ pos = pNext->GetPosition();
+ assert(pos >= 0);
+
+ if (pos < off_next)
+ i = k + 1;
+ else if (pos > off_next)
+ j = k;
+ else {
+ pResult = pNext;
+ return 0; // success
+ }
+ }
+
+ assert(i == j);
+
+ long long pos_;
+ long len_;
+
+ status = Cluster::HasBlockEntries(this, off_next, pos_, len_);
+
+ if (status < 0) { // error or underflow
+ pos = pos_;
+ len = len_;
+
+ return status;
+ }
+
+ if (status > 0) { // means "found at least one block entry"
+ Cluster* const pNext = Cluster::Create(this,
+ -1, // preloaded
+ off_next);
+ if (pNext == NULL)
+ return -1;
+
+ const ptrdiff_t idx_next = i - m_clusters; // insertion position
+
+ if (!PreloadCluster(pNext, idx_next)) {
+ delete pNext;
+ return -1;
+ }
+ assert(m_clusters);
+ assert(idx_next < m_clusterSize);
+ assert(m_clusters[idx_next] == pNext);
+
+ pResult = pNext;
+ return 0; // success
+ }
+
+ // status == 0 means "no block entries found"
+
+ if (cluster_size < 0) { // unknown size
+ const long long payload_pos = pos; // absolute pos of cluster payload
+
+ for (;;) { // determine cluster size
+ if ((total >= 0) && (pos >= total))
+ break;
+
+ if ((segment_stop >= 0) && (pos >= segment_stop))
+ break; // no more clusters
+
+ // Read ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long idpos = pos;
+ const long long id = ReadID(m_pReader, idpos, len);
+
+ if (id < 0) // error (or underflow)
+ return static_cast<long>(id);
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if (id == libwebm::kMkvCluster || id == libwebm::kMkvCues)
+ break;
+
+ pos += len; // consume ID (of sub-element)
+
+ // Read Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(m_pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(m_pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume size field of element
+
+ // pos now points to start of sub-element's payload
+
+ if (size == 0) // weird
+ continue;
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID; // not allowed for sub-elements
+
+ if ((segment_stop >= 0) && ((pos + size) > segment_stop)) // weird
+ return E_FILE_FORMAT_INVALID;
+
+ pos += size; // consume payload of sub-element
+ if (segment_stop >= 0 && pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+ } // determine cluster size
+
+ cluster_size = pos - payload_pos;
+ assert(cluster_size >= 0); // TODO: handle cluster_size = 0
+
+ pos = payload_pos; // reset and re-parse original cluster
+ }
+
+ pos += cluster_size; // consume payload
+ if (segment_stop >= 0 && pos > segment_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 2; // try to find a cluster that follows next
+}
+
+const Cluster* Segment::FindCluster(long long time_ns) const {
+ if ((m_clusters == NULL) || (m_clusterCount <= 0))
+ return &m_eos;
+
+ {
+ Cluster* const pCluster = m_clusters[0];
+ assert(pCluster);
+ assert(pCluster->m_index == 0);
+
+ if (time_ns <= pCluster->GetTime())
+ return pCluster;
+ }
+
+ // Binary search of cluster array
+
+ long i = 0;
+ long j = m_clusterCount;
+
+ while (i < j) {
+ // INVARIANT:
+ //[0, i) <= time_ns
+ //[i, j) ?
+ //[j, m_clusterCount) > time_ns
+
+ const long k = i + (j - i) / 2;
+ assert(k < m_clusterCount);
+
+ Cluster* const pCluster = m_clusters[k];
+ assert(pCluster);
+ assert(pCluster->m_index == k);
+
+ const long long t = pCluster->GetTime();
+
+ if (t <= time_ns)
+ i = k + 1;
+ else
+ j = k;
+
+ assert(i <= j);
+ }
+
+ assert(i == j);
+ assert(i > 0);
+ assert(i <= m_clusterCount);
+
+ const long k = i - 1;
+
+ Cluster* const pCluster = m_clusters[k];
+ assert(pCluster);
+ assert(pCluster->m_index == k);
+ assert(pCluster->GetTime() <= time_ns);
+
+ return pCluster;
+}
+
+const Tracks* Segment::GetTracks() const { return m_pTracks; }
+const SegmentInfo* Segment::GetInfo() const { return m_pInfo; }
+const Cues* Segment::GetCues() const { return m_pCues; }
+const Chapters* Segment::GetChapters() const { return m_pChapters; }
+const Tags* Segment::GetTags() const { return m_pTags; }
+const SeekHead* Segment::GetSeekHead() const { return m_pSeekHead; }
+
+long long Segment::GetDuration() const {
+ assert(m_pInfo);
+ return m_pInfo->GetDuration();
+}
+
+Chapters::Chapters(Segment* pSegment, long long payload_start,
+ long long payload_size, long long element_start,
+ long long element_size)
+ : m_pSegment(pSegment),
+ m_start(payload_start),
+ m_size(payload_size),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_editions(NULL),
+ m_editions_size(0),
+ m_editions_count(0) {}
+
+Chapters::~Chapters() {
+ while (m_editions_count > 0) {
+ Edition& e = m_editions[--m_editions_count];
+ e.Clear();
+ }
+ delete[] m_editions;
+}
+
+long Chapters::Parse() {
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = m_start; // payload start
+ const long long stop = pos + m_size; // payload stop
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == libwebm::kMkvEditionEntry) {
+ status = ParseEdition(pos, size);
+
+ if (status < 0) // error
+ return status;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+int Chapters::GetEditionCount() const { return m_editions_count; }
+
+const Chapters::Edition* Chapters::GetEdition(int idx) const {
+ if (idx < 0)
+ return NULL;
+
+ if (idx >= m_editions_count)
+ return NULL;
+
+ return m_editions + idx;
+}
+
+bool Chapters::ExpandEditionsArray() {
+ if (m_editions_size > m_editions_count)
+ return true; // nothing else to do
+
+ const int size = (m_editions_size == 0) ? 1 : 2 * m_editions_size;
+
+ Edition* const editions = new (std::nothrow) Edition[size];
+
+ if (editions == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_editions_count; ++idx) {
+ m_editions[idx].ShallowCopy(editions[idx]);
+ }
+
+ delete[] m_editions;
+ m_editions = editions;
+
+ m_editions_size = size;
+ return true;
+}
+
+long Chapters::ParseEdition(long long pos, long long size) {
+ if (!ExpandEditionsArray())
+ return -1;
+
+ Edition& e = m_editions[m_editions_count++];
+ e.Init();
+
+ return e.Parse(m_pSegment->m_pReader, pos, size);
+}
+
+Chapters::Edition::Edition() {}
+
+Chapters::Edition::~Edition() {}
+
+int Chapters::Edition::GetAtomCount() const { return m_atoms_count; }
+
+const Chapters::Atom* Chapters::Edition::GetAtom(int index) const {
+ if (index < 0)
+ return NULL;
+
+ if (index >= m_atoms_count)
+ return NULL;
+
+ return m_atoms + index;
+}
+
+void Chapters::Edition::Init() {
+ m_atoms = NULL;
+ m_atoms_size = 0;
+ m_atoms_count = 0;
+}
+
+void Chapters::Edition::ShallowCopy(Edition& rhs) const {
+ rhs.m_atoms = m_atoms;
+ rhs.m_atoms_size = m_atoms_size;
+ rhs.m_atoms_count = m_atoms_count;
+}
+
+void Chapters::Edition::Clear() {
+ while (m_atoms_count > 0) {
+ Atom& a = m_atoms[--m_atoms_count];
+ a.Clear();
+ }
+
+ delete[] m_atoms;
+ m_atoms = NULL;
+
+ m_atoms_size = 0;
+}
+
+long Chapters::Edition::Parse(IMkvReader* pReader, long long pos,
+ long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0)
+ continue;
+
+ if (id == libwebm::kMkvChapterAtom) {
+ status = ParseAtom(pReader, pos, size);
+
+ if (status < 0) // error
+ return status;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+long Chapters::Edition::ParseAtom(IMkvReader* pReader, long long pos,
+ long long size) {
+ if (!ExpandAtomsArray())
+ return -1;
+
+ Atom& a = m_atoms[m_atoms_count++];
+ a.Init();
+
+ return a.Parse(pReader, pos, size);
+}
+
+bool Chapters::Edition::ExpandAtomsArray() {
+ if (m_atoms_size > m_atoms_count)
+ return true; // nothing else to do
+
+ const int size = (m_atoms_size == 0) ? 1 : 2 * m_atoms_size;
+
+ Atom* const atoms = new (std::nothrow) Atom[size];
+
+ if (atoms == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_atoms_count; ++idx) {
+ m_atoms[idx].ShallowCopy(atoms[idx]);
+ }
+
+ delete[] m_atoms;
+ m_atoms = atoms;
+
+ m_atoms_size = size;
+ return true;
+}
+
+Chapters::Atom::Atom() {}
+
+Chapters::Atom::~Atom() {}
+
+unsigned long long Chapters::Atom::GetUID() const { return m_uid; }
+
+const char* Chapters::Atom::GetStringUID() const { return m_string_uid; }
+
+long long Chapters::Atom::GetStartTimecode() const { return m_start_timecode; }
+
+long long Chapters::Atom::GetStopTimecode() const { return m_stop_timecode; }
+
+long long Chapters::Atom::GetStartTime(const Chapters* pChapters) const {
+ return GetTime(pChapters, m_start_timecode);
+}
+
+long long Chapters::Atom::GetStopTime(const Chapters* pChapters) const {
+ return GetTime(pChapters, m_stop_timecode);
+}
+
+int Chapters::Atom::GetDisplayCount() const { return m_displays_count; }
+
+const Chapters::Display* Chapters::Atom::GetDisplay(int index) const {
+ if (index < 0)
+ return NULL;
+
+ if (index >= m_displays_count)
+ return NULL;
+
+ return m_displays + index;
+}
+
+void Chapters::Atom::Init() {
+ m_string_uid = NULL;
+ m_uid = 0;
+ m_start_timecode = -1;
+ m_stop_timecode = -1;
+
+ m_displays = NULL;
+ m_displays_size = 0;
+ m_displays_count = 0;
+}
+
+void Chapters::Atom::ShallowCopy(Atom& rhs) const {
+ rhs.m_string_uid = m_string_uid;
+ rhs.m_uid = m_uid;
+ rhs.m_start_timecode = m_start_timecode;
+ rhs.m_stop_timecode = m_stop_timecode;
+
+ rhs.m_displays = m_displays;
+ rhs.m_displays_size = m_displays_size;
+ rhs.m_displays_count = m_displays_count;
+}
+
+void Chapters::Atom::Clear() {
+ delete[] m_string_uid;
+ m_string_uid = NULL;
+
+ while (m_displays_count > 0) {
+ Display& d = m_displays[--m_displays_count];
+ d.Clear();
+ }
+
+ delete[] m_displays;
+ m_displays = NULL;
+
+ m_displays_size = 0;
+}
+
+long Chapters::Atom::Parse(IMkvReader* pReader, long long pos, long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // 0 length payload, skip.
+ continue;
+
+ if (id == libwebm::kMkvChapterDisplay) {
+ status = ParseDisplay(pReader, pos, size);
+
+ if (status < 0) // error
+ return status;
+ } else if (id == libwebm::kMkvChapterStringUID) {
+ status = UnserializeString(pReader, pos, size, m_string_uid);
+
+ if (status < 0) // error
+ return status;
+ } else if (id == libwebm::kMkvChapterUID) {
+ long long val;
+ status = UnserializeInt(pReader, pos, size, val);
+
+ if (status < 0) // error
+ return status;
+
+ m_uid = static_cast<unsigned long long>(val);
+ } else if (id == libwebm::kMkvChapterTimeStart) {
+ const long long val = UnserializeUInt(pReader, pos, size);
+
+ if (val < 0) // error
+ return static_cast<long>(val);
+
+ m_start_timecode = val;
+ } else if (id == libwebm::kMkvChapterTimeEnd) {
+ const long long val = UnserializeUInt(pReader, pos, size);
+
+ if (val < 0) // error
+ return static_cast<long>(val);
+
+ m_stop_timecode = val;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+long long Chapters::Atom::GetTime(const Chapters* pChapters,
+ long long timecode) {
+ if (pChapters == NULL)
+ return -1;
+
+ Segment* const pSegment = pChapters->m_pSegment;
+
+ if (pSegment == NULL) // weird
+ return -1;
+
+ const SegmentInfo* const pInfo = pSegment->GetInfo();
+
+ if (pInfo == NULL)
+ return -1;
+
+ const long long timecode_scale = pInfo->GetTimeCodeScale();
+
+ if (timecode_scale < 1) // weird
+ return -1;
+
+ if (timecode < 0)
+ return -1;
+
+ const long long result = timecode_scale * timecode;
+
+ return result;
+}
+
+long Chapters::Atom::ParseDisplay(IMkvReader* pReader, long long pos,
+ long long size) {
+ if (!ExpandDisplaysArray())
+ return -1;
+
+ Display& d = m_displays[m_displays_count++];
+ d.Init();
+
+ return d.Parse(pReader, pos, size);
+}
+
+bool Chapters::Atom::ExpandDisplaysArray() {
+ if (m_displays_size > m_displays_count)
+ return true; // nothing else to do
+
+ const int size = (m_displays_size == 0) ? 1 : 2 * m_displays_size;
+
+ Display* const displays = new (std::nothrow) Display[size];
+
+ if (displays == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_displays_count; ++idx) {
+ m_displays[idx].ShallowCopy(displays[idx]);
+ }
+
+ delete[] m_displays;
+ m_displays = displays;
+
+ m_displays_size = size;
+ return true;
+}
+
+Chapters::Display::Display() {}
+
+Chapters::Display::~Display() {}
+
+const char* Chapters::Display::GetString() const { return m_string; }
+
+const char* Chapters::Display::GetLanguage() const { return m_language; }
+
+const char* Chapters::Display::GetCountry() const { return m_country; }
+
+void Chapters::Display::Init() {
+ m_string = NULL;
+ m_language = NULL;
+ m_country = NULL;
+}
+
+void Chapters::Display::ShallowCopy(Display& rhs) const {
+ rhs.m_string = m_string;
+ rhs.m_language = m_language;
+ rhs.m_country = m_country;
+}
+
+void Chapters::Display::Clear() {
+ delete[] m_string;
+ m_string = NULL;
+
+ delete[] m_language;
+ m_language = NULL;
+
+ delete[] m_country;
+ m_country = NULL;
+}
+
+long Chapters::Display::Parse(IMkvReader* pReader, long long pos,
+ long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // No payload.
+ continue;
+
+ if (id == libwebm::kMkvChapString) {
+ status = UnserializeString(pReader, pos, size, m_string);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvChapLanguage) {
+ status = UnserializeString(pReader, pos, size, m_language);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvChapCountry) {
+ status = UnserializeString(pReader, pos, size, m_country);
+
+ if (status)
+ return status;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+Tags::Tags(Segment* pSegment, long long payload_start, long long payload_size,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(payload_start),
+ m_size(payload_size),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_tags(NULL),
+ m_tags_size(0),
+ m_tags_count(0) {}
+
+Tags::~Tags() {
+ while (m_tags_count > 0) {
+ Tag& t = m_tags[--m_tags_count];
+ t.Clear();
+ }
+ delete[] m_tags;
+}
+
+long Tags::Parse() {
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = m_start; // payload start
+ const long long stop = pos + m_size; // payload stop
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0)
+ return status;
+
+ if (size == 0) // 0 length tag, read another
+ continue;
+
+ if (id == libwebm::kMkvTag) {
+ status = ParseTag(pos, size);
+
+ if (status < 0)
+ return status;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+}
+
+int Tags::GetTagCount() const { return m_tags_count; }
+
+const Tags::Tag* Tags::GetTag(int idx) const {
+ if (idx < 0)
+ return NULL;
+
+ if (idx >= m_tags_count)
+ return NULL;
+
+ return m_tags + idx;
+}
+
+bool Tags::ExpandTagsArray() {
+ if (m_tags_size > m_tags_count)
+ return true; // nothing else to do
+
+ const int size = (m_tags_size == 0) ? 1 : 2 * m_tags_size;
+
+ Tag* const tags = new (std::nothrow) Tag[size];
+
+ if (tags == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_tags_count; ++idx) {
+ m_tags[idx].ShallowCopy(tags[idx]);
+ }
+
+ delete[] m_tags;
+ m_tags = tags;
+
+ m_tags_size = size;
+ return true;
+}
+
+long Tags::ParseTag(long long pos, long long size) {
+ if (!ExpandTagsArray())
+ return -1;
+
+ Tag& t = m_tags[m_tags_count++];
+ t.Init();
+
+ return t.Parse(m_pSegment->m_pReader, pos, size);
+}
+
+Tags::Tag::Tag() {}
+
+Tags::Tag::~Tag() {}
+
+int Tags::Tag::GetSimpleTagCount() const { return m_simple_tags_count; }
+
+const Tags::SimpleTag* Tags::Tag::GetSimpleTag(int index) const {
+ if (index < 0)
+ return NULL;
+
+ if (index >= m_simple_tags_count)
+ return NULL;
+
+ return m_simple_tags + index;
+}
+
+void Tags::Tag::Init() {
+ m_simple_tags = NULL;
+ m_simple_tags_size = 0;
+ m_simple_tags_count = 0;
+}
+
+void Tags::Tag::ShallowCopy(Tag& rhs) const {
+ rhs.m_simple_tags = m_simple_tags;
+ rhs.m_simple_tags_size = m_simple_tags_size;
+ rhs.m_simple_tags_count = m_simple_tags_count;
+}
+
+void Tags::Tag::Clear() {
+ while (m_simple_tags_count > 0) {
+ SimpleTag& d = m_simple_tags[--m_simple_tags_count];
+ d.Clear();
+ }
+
+ delete[] m_simple_tags;
+ m_simple_tags = NULL;
+
+ m_simple_tags_size = 0;
+}
+
+long Tags::Tag::Parse(IMkvReader* pReader, long long pos, long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0)
+ return status;
+
+ if (size == 0) // 0 length tag, read another
+ continue;
+
+ if (id == libwebm::kMkvSimpleTag) {
+ status = ParseSimpleTag(pReader, pos, size);
+
+ if (status < 0)
+ return status;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+long Tags::Tag::ParseSimpleTag(IMkvReader* pReader, long long pos,
+ long long size) {
+ if (!ExpandSimpleTagsArray())
+ return -1;
+
+ SimpleTag& st = m_simple_tags[m_simple_tags_count++];
+ st.Init();
+
+ return st.Parse(pReader, pos, size);
+}
+
+bool Tags::Tag::ExpandSimpleTagsArray() {
+ if (m_simple_tags_size > m_simple_tags_count)
+ return true; // nothing else to do
+
+ const int size = (m_simple_tags_size == 0) ? 1 : 2 * m_simple_tags_size;
+
+ SimpleTag* const displays = new (std::nothrow) SimpleTag[size];
+
+ if (displays == NULL)
+ return false;
+
+ for (int idx = 0; idx < m_simple_tags_count; ++idx) {
+ m_simple_tags[idx].ShallowCopy(displays[idx]);
+ }
+
+ delete[] m_simple_tags;
+ m_simple_tags = displays;
+
+ m_simple_tags_size = size;
+ return true;
+}
+
+Tags::SimpleTag::SimpleTag() {}
+
+Tags::SimpleTag::~SimpleTag() {}
+
+const char* Tags::SimpleTag::GetTagName() const { return m_tag_name; }
+
+const char* Tags::SimpleTag::GetTagString() const { return m_tag_string; }
+
+void Tags::SimpleTag::Init() {
+ m_tag_name = NULL;
+ m_tag_string = NULL;
+}
+
+void Tags::SimpleTag::ShallowCopy(SimpleTag& rhs) const {
+ rhs.m_tag_name = m_tag_name;
+ rhs.m_tag_string = m_tag_string;
+}
+
+void Tags::SimpleTag::Clear() {
+ delete[] m_tag_name;
+ m_tag_name = NULL;
+
+ delete[] m_tag_string;
+ m_tag_string = NULL;
+}
+
+long Tags::SimpleTag::Parse(IMkvReader* pReader, long long pos,
+ long long size) {
+ const long long stop = pos + size;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == libwebm::kMkvTagName) {
+ status = UnserializeString(pReader, pos, size, m_tag_name);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvTagString) {
+ status = UnserializeString(pReader, pos, size, m_tag_string);
+
+ if (status)
+ return status;
+ }
+
+ pos += size;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+SegmentInfo::SegmentInfo(Segment* pSegment, long long start, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_pMuxingAppAsUTF8(NULL),
+ m_pWritingAppAsUTF8(NULL),
+ m_pTitleAsUTF8(NULL) {}
+
+SegmentInfo::~SegmentInfo() {
+ delete[] m_pMuxingAppAsUTF8;
+ m_pMuxingAppAsUTF8 = NULL;
+
+ delete[] m_pWritingAppAsUTF8;
+ m_pWritingAppAsUTF8 = NULL;
+
+ delete[] m_pTitleAsUTF8;
+ m_pTitleAsUTF8 = NULL;
+}
+
+long SegmentInfo::Parse() {
+ assert(m_pMuxingAppAsUTF8 == NULL);
+ assert(m_pWritingAppAsUTF8 == NULL);
+ assert(m_pTitleAsUTF8 == NULL);
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = m_start;
+ const long long stop = m_start + m_size;
+
+ m_timecodeScale = 1000000;
+ m_duration = -1;
+
+ while (pos < stop) {
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvTimecodeScale) {
+ m_timecodeScale = UnserializeUInt(pReader, pos, size);
+
+ if (m_timecodeScale <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvDuration) {
+ const long status = UnserializeFloat(pReader, pos, size, m_duration);
+
+ if (status < 0)
+ return status;
+
+ if (m_duration < 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvMuxingApp) {
+ const long status =
+ UnserializeString(pReader, pos, size, m_pMuxingAppAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvWritingApp) {
+ const long status =
+ UnserializeString(pReader, pos, size, m_pWritingAppAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvTitle) {
+ const long status = UnserializeString(pReader, pos, size, m_pTitleAsUTF8);
+
+ if (status)
+ return status;
+ }
+
+ pos += size;
+
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ const double rollover_check = m_duration * m_timecodeScale;
+ if (rollover_check > static_cast<double>(LLONG_MAX))
+ return E_FILE_FORMAT_INVALID;
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+}
+
+long long SegmentInfo::GetTimeCodeScale() const { return m_timecodeScale; }
+
+long long SegmentInfo::GetDuration() const {
+ if (m_duration < 0)
+ return -1;
+
+ assert(m_timecodeScale >= 1);
+
+ const double dd = double(m_duration) * double(m_timecodeScale);
+ const long long d = static_cast<long long>(dd);
+
+ return d;
+}
+
+const char* SegmentInfo::GetMuxingAppAsUTF8() const {
+ return m_pMuxingAppAsUTF8;
+}
+
+const char* SegmentInfo::GetWritingAppAsUTF8() const {
+ return m_pWritingAppAsUTF8;
+}
+
+const char* SegmentInfo::GetTitleAsUTF8() const { return m_pTitleAsUTF8; }
+
+///////////////////////////////////////////////////////////////
+// ContentEncoding element
+ContentEncoding::ContentCompression::ContentCompression()
+ : algo(0), settings(NULL), settings_len(0) {}
+
+ContentEncoding::ContentCompression::~ContentCompression() {
+ delete[] settings;
+}
+
+ContentEncoding::ContentEncryption::ContentEncryption()
+ : algo(0),
+ key_id(NULL),
+ key_id_len(0),
+ signature(NULL),
+ signature_len(0),
+ sig_key_id(NULL),
+ sig_key_id_len(0),
+ sig_algo(0),
+ sig_hash_algo(0) {}
+
+ContentEncoding::ContentEncryption::~ContentEncryption() {
+ delete[] key_id;
+ delete[] signature;
+ delete[] sig_key_id;
+}
+
+ContentEncoding::ContentEncoding()
+ : compression_entries_(NULL),
+ compression_entries_end_(NULL),
+ encryption_entries_(NULL),
+ encryption_entries_end_(NULL),
+ encoding_order_(0),
+ encoding_scope_(1),
+ encoding_type_(0) {}
+
+ContentEncoding::~ContentEncoding() {
+ ContentCompression** comp_i = compression_entries_;
+ ContentCompression** const comp_j = compression_entries_end_;
+
+ while (comp_i != comp_j) {
+ ContentCompression* const comp = *comp_i++;
+ delete comp;
+ }
+
+ delete[] compression_entries_;
+
+ ContentEncryption** enc_i = encryption_entries_;
+ ContentEncryption** const enc_j = encryption_entries_end_;
+
+ while (enc_i != enc_j) {
+ ContentEncryption* const enc = *enc_i++;
+ delete enc;
+ }
+
+ delete[] encryption_entries_;
+}
+
+const ContentEncoding::ContentCompression*
+ContentEncoding::GetCompressionByIndex(unsigned long idx) const {
+ const ptrdiff_t count = compression_entries_end_ - compression_entries_;
+ assert(count >= 0);
+
+ if (idx >= static_cast<unsigned long>(count))
+ return NULL;
+
+ return compression_entries_[idx];
+}
+
+unsigned long ContentEncoding::GetCompressionCount() const {
+ const ptrdiff_t count = compression_entries_end_ - compression_entries_;
+ assert(count >= 0);
+
+ return static_cast<unsigned long>(count);
+}
+
+const ContentEncoding::ContentEncryption* ContentEncoding::GetEncryptionByIndex(
+ unsigned long idx) const {
+ const ptrdiff_t count = encryption_entries_end_ - encryption_entries_;
+ assert(count >= 0);
+
+ if (idx >= static_cast<unsigned long>(count))
+ return NULL;
+
+ return encryption_entries_[idx];
+}
+
+unsigned long ContentEncoding::GetEncryptionCount() const {
+ const ptrdiff_t count = encryption_entries_end_ - encryption_entries_;
+ assert(count >= 0);
+
+ return static_cast<unsigned long>(count);
+}
+
+long ContentEncoding::ParseContentEncAESSettingsEntry(
+ long long start, long long size, IMkvReader* pReader,
+ ContentEncAESSettings* aes) {
+ assert(pReader);
+ assert(aes);
+
+ long long pos = start;
+ const long long stop = start + size;
+
+ while (pos < stop) {
+ long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvAESSettingsCipherMode) {
+ aes->cipher_mode = UnserializeUInt(pReader, pos, size);
+ if (aes->cipher_mode != 1)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ return 0;
+}
+
+long ContentEncoding::ParseContentEncodingEntry(long long start, long long size,
+ IMkvReader* pReader) {
+ assert(pReader);
+
+ long long pos = start;
+ const long long stop = start + size;
+
+ // Count ContentCompression and ContentEncryption elements.
+ int compression_count = 0;
+ int encryption_count = 0;
+
+ while (pos < stop) {
+ long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvContentCompression)
+ ++compression_count;
+
+ if (id == libwebm::kMkvContentEncryption)
+ ++encryption_count;
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (compression_count <= 0 && encryption_count <= 0)
+ return -1;
+
+ if (compression_count > 0) {
+ compression_entries_ =
+ new (std::nothrow) ContentCompression*[compression_count];
+ if (!compression_entries_)
+ return -1;
+ compression_entries_end_ = compression_entries_;
+ }
+
+ if (encryption_count > 0) {
+ encryption_entries_ =
+ new (std::nothrow) ContentEncryption*[encryption_count];
+ if (!encryption_entries_) {
+ delete[] compression_entries_;
+ return -1;
+ }
+ encryption_entries_end_ = encryption_entries_;
+ }
+
+ pos = start;
+ while (pos < stop) {
+ long long id, size;
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvContentEncodingOrder) {
+ encoding_order_ = UnserializeUInt(pReader, pos, size);
+ } else if (id == libwebm::kMkvContentEncodingScope) {
+ encoding_scope_ = UnserializeUInt(pReader, pos, size);
+ if (encoding_scope_ < 1)
+ return -1;
+ } else if (id == libwebm::kMkvContentEncodingType) {
+ encoding_type_ = UnserializeUInt(pReader, pos, size);
+ } else if (id == libwebm::kMkvContentCompression) {
+ ContentCompression* const compression =
+ new (std::nothrow) ContentCompression();
+ if (!compression)
+ return -1;
+
+ status = ParseCompressionEntry(pos, size, pReader, compression);
+ if (status) {
+ delete compression;
+ return status;
+ }
+ *compression_entries_end_++ = compression;
+ } else if (id == libwebm::kMkvContentEncryption) {
+ ContentEncryption* const encryption =
+ new (std::nothrow) ContentEncryption();
+ if (!encryption)
+ return -1;
+
+ status = ParseEncryptionEntry(pos, size, pReader, encryption);
+ if (status) {
+ delete encryption;
+ return status;
+ }
+ *encryption_entries_end_++ = encryption;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ return 0;
+}
+
+long ContentEncoding::ParseCompressionEntry(long long start, long long size,
+ IMkvReader* pReader,
+ ContentCompression* compression) {
+ assert(pReader);
+ assert(compression);
+
+ long long pos = start;
+ const long long stop = start + size;
+
+ bool valid = false;
+
+ while (pos < stop) {
+ long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvContentCompAlgo) {
+ long long algo = UnserializeUInt(pReader, pos, size);
+ if (algo < 0)
+ return E_FILE_FORMAT_INVALID;
+ compression->algo = algo;
+ valid = true;
+ } else if (id == libwebm::kMkvContentCompSettings) {
+ if (size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const size_t buflen = static_cast<size_t>(size);
+ unsigned char* buf = SafeArrayAlloc<unsigned char>(1, buflen);
+ if (buf == NULL)
+ return -1;
+
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
+ if (read_status) {
+ delete[] buf;
+ return status;
+ }
+
+ compression->settings = buf;
+ compression->settings_len = buflen;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ // ContentCompAlgo is mandatory
+ if (!valid)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+}
+
+long ContentEncoding::ParseEncryptionEntry(long long start, long long size,
+ IMkvReader* pReader,
+ ContentEncryption* encryption) {
+ assert(pReader);
+ assert(encryption);
+
+ long long pos = start;
+ const long long stop = start + size;
+
+ while (pos < stop) {
+ long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvContentEncAlgo) {
+ encryption->algo = UnserializeUInt(pReader, pos, size);
+ if (encryption->algo != 5)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvContentEncKeyID) {
+ delete[] encryption->key_id;
+ encryption->key_id = NULL;
+ encryption->key_id_len = 0;
+
+ if (size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const size_t buflen = static_cast<size_t>(size);
+ unsigned char* buf = SafeArrayAlloc<unsigned char>(1, buflen);
+ if (buf == NULL)
+ return -1;
+
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
+ if (read_status) {
+ delete[] buf;
+ return status;
+ }
+
+ encryption->key_id = buf;
+ encryption->key_id_len = buflen;
+ } else if (id == libwebm::kMkvContentSignature) {
+ delete[] encryption->signature;
+ encryption->signature = NULL;
+ encryption->signature_len = 0;
+
+ if (size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const size_t buflen = static_cast<size_t>(size);
+ unsigned char* buf = SafeArrayAlloc<unsigned char>(1, buflen);
+ if (buf == NULL)
+ return -1;
+
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
+ if (read_status) {
+ delete[] buf;
+ return status;
+ }
+
+ encryption->signature = buf;
+ encryption->signature_len = buflen;
+ } else if (id == libwebm::kMkvContentSigKeyID) {
+ delete[] encryption->sig_key_id;
+ encryption->sig_key_id = NULL;
+ encryption->sig_key_id_len = 0;
+
+ if (size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const size_t buflen = static_cast<size_t>(size);
+ unsigned char* buf = SafeArrayAlloc<unsigned char>(1, buflen);
+ if (buf == NULL)
+ return -1;
+
+ const int read_status =
+ pReader->Read(pos, static_cast<long>(buflen), buf);
+ if (read_status) {
+ delete[] buf;
+ return status;
+ }
+
+ encryption->sig_key_id = buf;
+ encryption->sig_key_id_len = buflen;
+ } else if (id == libwebm::kMkvContentSigAlgo) {
+ encryption->sig_algo = UnserializeUInt(pReader, pos, size);
+ } else if (id == libwebm::kMkvContentSigHashAlgo) {
+ encryption->sig_hash_algo = UnserializeUInt(pReader, pos, size);
+ } else if (id == libwebm::kMkvContentEncAESSettings) {
+ const long status = ParseContentEncAESSettingsEntry(
+ pos, size, pReader, &encryption->aes_settings);
+ if (status)
+ return status;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ return 0;
+}
+
+Track::Track(Segment* pSegment, long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ content_encoding_entries_(NULL),
+ content_encoding_entries_end_(NULL) {}
+
+Track::~Track() {
+ Info& info = const_cast<Info&>(m_info);
+ info.Clear();
+
+ ContentEncoding** i = content_encoding_entries_;
+ ContentEncoding** const j = content_encoding_entries_end_;
+
+ while (i != j) {
+ ContentEncoding* const encoding = *i++;
+ delete encoding;
+ }
+
+ delete[] content_encoding_entries_;
+}
+
+long Track::Create(Segment* pSegment, const Info& info, long long element_start,
+ long long element_size, Track*& pResult) {
+ if (pResult)
+ return -1;
+
+ Track* const pTrack =
+ new (std::nothrow) Track(pSegment, element_start, element_size);
+
+ if (pTrack == NULL)
+ return -1; // generic error
+
+ const int status = info.Copy(pTrack->m_info);
+
+ if (status) { // error
+ delete pTrack;
+ return status;
+ }
+
+ pResult = pTrack;
+ return 0; // success
+}
+
+Track::Info::Info()
+ : uid(0),
+ defaultDuration(0),
+ codecDelay(0),
+ seekPreRoll(0),
+ nameAsUTF8(NULL),
+ language(NULL),
+ codecId(NULL),
+ codecNameAsUTF8(NULL),
+ codecPrivate(NULL),
+ codecPrivateSize(0),
+ lacing(false) {}
+
+Track::Info::~Info() { Clear(); }
+
+void Track::Info::Clear() {
+ delete[] nameAsUTF8;
+ nameAsUTF8 = NULL;
+
+ delete[] language;
+ language = NULL;
+
+ delete[] codecId;
+ codecId = NULL;
+
+ delete[] codecPrivate;
+ codecPrivate = NULL;
+ codecPrivateSize = 0;
+
+ delete[] codecNameAsUTF8;
+ codecNameAsUTF8 = NULL;
+}
+
+int Track::Info::CopyStr(char* Info::*str, Info& dst_) const {
+ if (str == static_cast<char * Info::*>(NULL))
+ return -1;
+
+ char*& dst = dst_.*str;
+
+ if (dst) // should be NULL already
+ return -1;
+
+ const char* const src = this->*str;
+
+ if (src == NULL)
+ return 0;
+
+ const size_t len = strlen(src);
+
+ dst = SafeArrayAlloc<char>(1, len + 1);
+
+ if (dst == NULL)
+ return -1;
+
+ strcpy(dst, src);
+
+ return 0;
+}
+
+int Track::Info::Copy(Info& dst) const {
+ if (&dst == this)
+ return 0;
+
+ dst.type = type;
+ dst.number = number;
+ dst.defaultDuration = defaultDuration;
+ dst.codecDelay = codecDelay;
+ dst.seekPreRoll = seekPreRoll;
+ dst.uid = uid;
+ dst.lacing = lacing;
+ dst.settings = settings;
+
+ // We now copy the string member variables from src to dst.
+ // This involves memory allocation so in principle the operation
+ // can fail (indeed, that's why we have Info::Copy), so we must
+ // report this to the caller. An error return from this function
+ // therefore implies that the copy was only partially successful.
+
+ if (int status = CopyStr(&Info::nameAsUTF8, dst))
+ return status;
+
+ if (int status = CopyStr(&Info::language, dst))
+ return status;
+
+ if (int status = CopyStr(&Info::codecId, dst))
+ return status;
+
+ if (int status = CopyStr(&Info::codecNameAsUTF8, dst))
+ return status;
+
+ if (codecPrivateSize > 0) {
+ if (codecPrivate == NULL)
+ return -1;
+
+ if (dst.codecPrivate)
+ return -1;
+
+ if (dst.codecPrivateSize != 0)
+ return -1;
+
+ dst.codecPrivate = SafeArrayAlloc<unsigned char>(1, codecPrivateSize);
+
+ if (dst.codecPrivate == NULL)
+ return -1;
+
+ memcpy(dst.codecPrivate, codecPrivate, codecPrivateSize);
+ dst.codecPrivateSize = codecPrivateSize;
+ }
+
+ return 0;
+}
+
+const BlockEntry* Track::GetEOS() const { return &m_eos; }
+
+long Track::GetType() const { return m_info.type; }
+
+long Track::GetNumber() const { return m_info.number; }
+
+unsigned long long Track::GetUid() const { return m_info.uid; }
+
+const char* Track::GetNameAsUTF8() const { return m_info.nameAsUTF8; }
+
+const char* Track::GetLanguage() const { return m_info.language; }
+
+const char* Track::GetCodecNameAsUTF8() const { return m_info.codecNameAsUTF8; }
+
+const char* Track::GetCodecId() const { return m_info.codecId; }
+
+const unsigned char* Track::GetCodecPrivate(size_t& size) const {
+ size = m_info.codecPrivateSize;
+ return m_info.codecPrivate;
+}
+
+bool Track::GetLacing() const { return m_info.lacing; }
+
+unsigned long long Track::GetDefaultDuration() const {
+ return m_info.defaultDuration;
+}
+
+unsigned long long Track::GetCodecDelay() const { return m_info.codecDelay; }
+
+unsigned long long Track::GetSeekPreRoll() const { return m_info.seekPreRoll; }
+
+long Track::GetFirst(const BlockEntry*& pBlockEntry) const {
+ const Cluster* pCluster = m_pSegment->GetFirst();
+
+ for (int i = 0;;) {
+ if (pCluster == NULL) {
+ pBlockEntry = GetEOS();
+ return 1;
+ }
+
+ if (pCluster->EOS()) {
+ if (m_pSegment->DoneParsing()) {
+ pBlockEntry = GetEOS();
+ return 1;
+ }
+
+ pBlockEntry = 0;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long status = pCluster->GetFirst(pBlockEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pBlockEntry == 0) { // empty cluster
+ pCluster = m_pSegment->GetNext(pCluster);
+ continue;
+ }
+
+ for (;;) {
+ const Block* const pBlock = pBlockEntry->GetBlock();
+ assert(pBlock);
+
+ const long long tn = pBlock->GetTrackNumber();
+
+ if ((tn == m_info.number) && VetEntry(pBlockEntry))
+ return 0;
+
+ const BlockEntry* pNextEntry;
+
+ status = pCluster->GetNext(pBlockEntry, pNextEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pNextEntry == 0)
+ break;
+
+ pBlockEntry = pNextEntry;
+ }
+
+ ++i;
+
+ if (i >= 100)
+ break;
+
+ pCluster = m_pSegment->GetNext(pCluster);
+ }
+
+ // NOTE: if we get here, it means that we didn't find a block with
+ // a matching track number. We interpret that as an error (which
+ // might be too conservative).
+
+ pBlockEntry = GetEOS(); // so we can return a non-NULL value
+ return 1;
+}
+
+long Track::GetNext(const BlockEntry* pCurrEntry,
+ const BlockEntry*& pNextEntry) const {
+ assert(pCurrEntry);
+ assert(!pCurrEntry->EOS()); //?
+
+ const Block* const pCurrBlock = pCurrEntry->GetBlock();
+ assert(pCurrBlock && pCurrBlock->GetTrackNumber() == m_info.number);
+ if (!pCurrBlock || pCurrBlock->GetTrackNumber() != m_info.number)
+ return -1;
+
+ const Cluster* pCluster = pCurrEntry->GetCluster();
+ assert(pCluster);
+ assert(!pCluster->EOS());
+
+ long status = pCluster->GetNext(pCurrEntry, pNextEntry);
+
+ if (status < 0) // error
+ return status;
+
+ for (int i = 0;;) {
+ while (pNextEntry) {
+ const Block* const pNextBlock = pNextEntry->GetBlock();
+ assert(pNextBlock);
+
+ if (pNextBlock->GetTrackNumber() == m_info.number)
+ return 0;
+
+ pCurrEntry = pNextEntry;
+
+ status = pCluster->GetNext(pCurrEntry, pNextEntry);
+
+ if (status < 0) // error
+ return status;
+ }
+
+ pCluster = m_pSegment->GetNext(pCluster);
+
+ if (pCluster == NULL) {
+ pNextEntry = GetEOS();
+ return 1;
+ }
+
+ if (pCluster->EOS()) {
+ if (m_pSegment->DoneParsing()) {
+ pNextEntry = GetEOS();
+ return 1;
+ }
+
+ // TODO: there is a potential O(n^2) problem here: we tell the
+ // caller to (pre)load another cluster, which he does, but then he
+ // calls GetNext again, which repeats the same search. This is
+ // a pathological case, since the only way it can happen is if
+ // there exists a long sequence of clusters none of which contain a
+ // block from this track. One way around this problem is for the
+ // caller to be smarter when he loads another cluster: don't call
+ // us back until you have a cluster that contains a block from this
+ // track. (Of course, that's not cheap either, since our caller
+ // would have to scan the each cluster as it's loaded, so that
+ // would just push back the problem.)
+
+ pNextEntry = NULL;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ status = pCluster->GetFirst(pNextEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pNextEntry == NULL) // empty cluster
+ continue;
+
+ ++i;
+
+ if (i >= 100)
+ break;
+ }
+
+ // NOTE: if we get here, it means that we didn't find a block with
+ // a matching track number after lots of searching, so we give
+ // up trying.
+
+ pNextEntry = GetEOS(); // so we can return a non-NULL value
+ return 1;
+}
+
+bool Track::VetEntry(const BlockEntry* pBlockEntry) const {
+ assert(pBlockEntry);
+ const Block* const pBlock = pBlockEntry->GetBlock();
+ assert(pBlock);
+ assert(pBlock->GetTrackNumber() == m_info.number);
+ if (!pBlock || pBlock->GetTrackNumber() != m_info.number)
+ return false;
+
+ // This function is used during a seek to determine whether the
+ // frame is a valid seek target. This default function simply
+ // returns true, which means all frames are valid seek targets.
+ // It gets overridden by the VideoTrack class, because only video
+ // keyframes can be used as seek target.
+
+ return true;
+}
+
+long Track::Seek(long long time_ns, const BlockEntry*& pResult) const {
+ const long status = GetFirst(pResult);
+
+ if (status < 0) // buffer underflow, etc
+ return status;
+
+ assert(pResult);
+
+ if (pResult->EOS())
+ return 0;
+
+ const Cluster* pCluster = pResult->GetCluster();
+ assert(pCluster);
+ assert(pCluster->GetIndex() >= 0);
+
+ if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
+ return 0;
+
+ Cluster** const clusters = m_pSegment->m_clusters;
+ assert(clusters);
+
+ const long count = m_pSegment->GetCount(); // loaded only, not preloaded
+ assert(count > 0);
+
+ Cluster** const i = clusters + pCluster->GetIndex();
+ assert(i);
+ assert(*i == pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ Cluster** const j = clusters + count;
+
+ Cluster** lo = i;
+ Cluster** hi = j;
+
+ while (lo < hi) {
+ // INVARIANT:
+ //[i, lo) <= time_ns
+ //[lo, hi) ?
+ //[hi, j) > time_ns
+
+ Cluster** const mid = lo + (hi - lo) / 2;
+ assert(mid < hi);
+
+ pCluster = *mid;
+ assert(pCluster);
+ assert(pCluster->GetIndex() >= 0);
+ assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
+
+ const long long t = pCluster->GetTime();
+
+ if (t <= time_ns)
+ lo = mid + 1;
+ else
+ hi = mid;
+
+ assert(lo <= hi);
+ }
+
+ assert(lo == hi);
+ assert(lo > i);
+ assert(lo <= j);
+
+ while (lo > i) {
+ pCluster = *--lo;
+ assert(pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ pResult = pCluster->GetEntry(this);
+
+ if ((pResult != 0) && !pResult->EOS())
+ return 0;
+
+ // landed on empty cluster (no entries)
+ }
+
+ pResult = GetEOS(); // weird
+ return 0;
+}
+
+const ContentEncoding* Track::GetContentEncodingByIndex(
+ unsigned long idx) const {
+ const ptrdiff_t count =
+ content_encoding_entries_end_ - content_encoding_entries_;
+ assert(count >= 0);
+
+ if (idx >= static_cast<unsigned long>(count))
+ return NULL;
+
+ return content_encoding_entries_[idx];
+}
+
+unsigned long Track::GetContentEncodingCount() const {
+ const ptrdiff_t count =
+ content_encoding_entries_end_ - content_encoding_entries_;
+ assert(count >= 0);
+
+ return static_cast<unsigned long>(count);
+}
+
+long Track::ParseContentEncodingsEntry(long long start, long long size) {
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+ assert(pReader);
+
+ long long pos = start;
+ const long long stop = start + size;
+
+ // Count ContentEncoding elements.
+ int count = 0;
+ while (pos < stop) {
+ long long id, size;
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ // pos now designates start of element
+ if (id == libwebm::kMkvContentEncoding)
+ ++count;
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (count <= 0)
+ return -1;
+
+ content_encoding_entries_ = new (std::nothrow) ContentEncoding*[count];
+ if (!content_encoding_entries_)
+ return -1;
+
+ content_encoding_entries_end_ = content_encoding_entries_;
+
+ pos = start;
+ while (pos < stop) {
+ long long id, size;
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+ if (status < 0) // error
+ return status;
+
+ // pos now designates start of element
+ if (id == libwebm::kMkvContentEncoding) {
+ ContentEncoding* const content_encoding =
+ new (std::nothrow) ContentEncoding();
+ if (!content_encoding)
+ return -1;
+
+ status = content_encoding->ParseContentEncodingEntry(pos, size, pReader);
+ if (status) {
+ delete content_encoding;
+ return status;
+ }
+
+ *content_encoding_entries_end_++ = content_encoding;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0;
+}
+
+Track::EOSBlock::EOSBlock() : BlockEntry(NULL, LONG_MIN) {}
+
+BlockEntry::Kind Track::EOSBlock::GetKind() const { return kBlockEOS; }
+
+const Block* Track::EOSBlock::GetBlock() const { return NULL; }
+
+bool PrimaryChromaticity::Parse(IMkvReader* reader, long long read_pos,
+ long long value_size, bool is_x,
+ PrimaryChromaticity** chromaticity) {
+ if (!reader)
+ return false;
+
+ if (!*chromaticity)
+ *chromaticity = new PrimaryChromaticity();
+
+ if (!*chromaticity)
+ return false;
+
+ PrimaryChromaticity* pc = *chromaticity;
+ float* value = is_x ? &pc->x : &pc->y;
+
+ double parser_value = 0;
+ const long long parse_status =
+ UnserializeFloat(reader, read_pos, value_size, parser_value);
+
+ // Valid range is [0, 1]. Make sure the double is representable as a float
+ // before casting.
+ if (parse_status < 0 || parser_value < 0.0 || parser_value > 1.0 ||
+ (parser_value > 0.0 && parser_value < FLT_MIN))
+ return false;
+
+ *value = static_cast<float>(parser_value);
+
+ return true;
+}
+
+bool MasteringMetadata::Parse(IMkvReader* reader, long long mm_start,
+ long long mm_size, MasteringMetadata** mm) {
+ if (!reader || *mm)
+ return false;
+
+ std::unique_ptr<MasteringMetadata> mm_ptr(new MasteringMetadata());
+ if (!mm_ptr.get())
+ return false;
+
+ const long long mm_end = mm_start + mm_size;
+ long long read_pos = mm_start;
+
+ while (read_pos < mm_end) {
+ long long child_id = 0;
+ long long child_size = 0;
+
+ const long long status =
+ ParseElementHeader(reader, read_pos, mm_end, child_id, child_size);
+ if (status < 0)
+ return false;
+
+ if (child_id == libwebm::kMkvLuminanceMax) {
+ double value = 0;
+ const long long value_parse_status =
+ UnserializeFloat(reader, read_pos, child_size, value);
+ if (value < -FLT_MAX || value > FLT_MAX ||
+ (value > 0.0 && value < FLT_MIN)) {
+ return false;
+ }
+ mm_ptr->luminance_max = static_cast<float>(value);
+ if (value_parse_status < 0 || mm_ptr->luminance_max < 0.0 ||
+ mm_ptr->luminance_max > 9999.99) {
+ return false;
+ }
+ } else if (child_id == libwebm::kMkvLuminanceMin) {
+ double value = 0;
+ const long long value_parse_status =
+ UnserializeFloat(reader, read_pos, child_size, value);
+ if (value < -FLT_MAX || value > FLT_MAX ||
+ (value > 0.0 && value < FLT_MIN)) {
+ return false;
+ }
+ mm_ptr->luminance_min = static_cast<float>(value);
+ if (value_parse_status < 0 || mm_ptr->luminance_min < 0.0 ||
+ mm_ptr->luminance_min > 999.9999) {
+ return false;
+ }
+ } else {
+ bool is_x = false;
+ PrimaryChromaticity** chromaticity;
+ switch (child_id) {
+ case libwebm::kMkvPrimaryRChromaticityX:
+ case libwebm::kMkvPrimaryRChromaticityY:
+ is_x = child_id == libwebm::kMkvPrimaryRChromaticityX;
+ chromaticity = &mm_ptr->r;
+ break;
+ case libwebm::kMkvPrimaryGChromaticityX:
+ case libwebm::kMkvPrimaryGChromaticityY:
+ is_x = child_id == libwebm::kMkvPrimaryGChromaticityX;
+ chromaticity = &mm_ptr->g;
+ break;
+ case libwebm::kMkvPrimaryBChromaticityX:
+ case libwebm::kMkvPrimaryBChromaticityY:
+ is_x = child_id == libwebm::kMkvPrimaryBChromaticityX;
+ chromaticity = &mm_ptr->b;
+ break;
+ case libwebm::kMkvWhitePointChromaticityX:
+ case libwebm::kMkvWhitePointChromaticityY:
+ is_x = child_id == libwebm::kMkvWhitePointChromaticityX;
+ chromaticity = &mm_ptr->white_point;
+ break;
+ default:
+ return false;
+ }
+ const bool value_parse_status = PrimaryChromaticity::Parse(
+ reader, read_pos, child_size, is_x, chromaticity);
+ if (!value_parse_status)
+ return false;
+ }
+
+ read_pos += child_size;
+ if (read_pos > mm_end)
+ return false;
+ }
+
+ *mm = mm_ptr.release();
+ return true;
+}
+
+bool Colour::Parse(IMkvReader* reader, long long colour_start,
+ long long colour_size, Colour** colour) {
+ if (!reader || *colour)
+ return false;
+
+ std::unique_ptr<Colour> colour_ptr(new Colour());
+ if (!colour_ptr.get())
+ return false;
+
+ const long long colour_end = colour_start + colour_size;
+ long long read_pos = colour_start;
+
+ while (read_pos < colour_end) {
+ long long child_id = 0;
+ long long child_size = 0;
+
+ const long status =
+ ParseElementHeader(reader, read_pos, colour_end, child_id, child_size);
+ if (status < 0)
+ return false;
+
+ if (child_id == libwebm::kMkvMatrixCoefficients) {
+ colour_ptr->matrix_coefficients =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->matrix_coefficients < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvBitsPerChannel) {
+ colour_ptr->bits_per_channel =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->bits_per_channel < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvChromaSubsamplingHorz) {
+ colour_ptr->chroma_subsampling_horz =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->chroma_subsampling_horz < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvChromaSubsamplingVert) {
+ colour_ptr->chroma_subsampling_vert =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->chroma_subsampling_vert < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvCbSubsamplingHorz) {
+ colour_ptr->cb_subsampling_horz =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->cb_subsampling_horz < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvCbSubsamplingVert) {
+ colour_ptr->cb_subsampling_vert =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->cb_subsampling_vert < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvChromaSitingHorz) {
+ colour_ptr->chroma_siting_horz =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->chroma_siting_horz < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvChromaSitingVert) {
+ colour_ptr->chroma_siting_vert =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->chroma_siting_vert < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvRange) {
+ colour_ptr->range = UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->range < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvTransferCharacteristics) {
+ colour_ptr->transfer_characteristics =
+ UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->transfer_characteristics < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvPrimaries) {
+ colour_ptr->primaries = UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->primaries < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvMaxCLL) {
+ colour_ptr->max_cll = UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->max_cll < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvMaxFALL) {
+ colour_ptr->max_fall = UnserializeUInt(reader, read_pos, child_size);
+ if (colour_ptr->max_fall < 0)
+ return false;
+ } else if (child_id == libwebm::kMkvMasteringMetadata) {
+ if (!MasteringMetadata::Parse(reader, read_pos, child_size,
+ &colour_ptr->mastering_metadata))
+ return false;
+ } else {
+ return false;
+ }
+
+ read_pos += child_size;
+ if (read_pos > colour_end)
+ return false;
+ }
+ *colour = colour_ptr.release();
+ return true;
+}
+
+bool Projection::Parse(IMkvReader* reader, long long start, long long size,
+ Projection** projection) {
+ if (!reader || *projection)
+ return false;
+
+ std::unique_ptr<Projection> projection_ptr(new Projection());
+ if (!projection_ptr.get())
+ return false;
+
+ const long long end = start + size;
+ long long read_pos = start;
+
+ while (read_pos < end) {
+ long long child_id = 0;
+ long long child_size = 0;
+
+ const long long status =
+ ParseElementHeader(reader, read_pos, end, child_id, child_size);
+ if (status < 0)
+ return false;
+
+ if (child_id == libwebm::kMkvProjectionType) {
+ long long projection_type = kTypeNotPresent;
+ projection_type = UnserializeUInt(reader, read_pos, child_size);
+ if (projection_type < 0)
+ return false;
+
+ projection_ptr->type = static_cast<ProjectionType>(projection_type);
+ } else if (child_id == libwebm::kMkvProjectionPrivate) {
+ unsigned char* data = SafeArrayAlloc<unsigned char>(1, child_size);
+
+ if (data == NULL)
+ return false;
+
+ const int status =
+ reader->Read(read_pos, static_cast<long>(child_size), data);
+
+ if (status) {
+ delete[] data;
+ return false;
+ }
+
+ projection_ptr->private_data = data;
+ projection_ptr->private_data_length = static_cast<size_t>(child_size);
+ } else {
+ double value = 0;
+ const long long value_parse_status =
+ UnserializeFloat(reader, read_pos, child_size, value);
+ // Make sure value is representable as a float before casting.
+ if (value_parse_status < 0 || value < -FLT_MAX || value > FLT_MAX ||
+ (value > 0.0 && value < FLT_MIN)) {
+ return false;
+ }
+
+ switch (child_id) {
+ case libwebm::kMkvProjectionPoseYaw:
+ projection_ptr->pose_yaw = static_cast<float>(value);
+ break;
+ case libwebm::kMkvProjectionPosePitch:
+ projection_ptr->pose_pitch = static_cast<float>(value);
+ break;
+ case libwebm::kMkvProjectionPoseRoll:
+ projection_ptr->pose_roll = static_cast<float>(value);
+ break;
+ default:
+ return false;
+ }
+ }
+
+ read_pos += child_size;
+ if (read_pos > end)
+ return false;
+ }
+
+ *projection = projection_ptr.release();
+ return true;
+}
+
+VideoTrack::VideoTrack(Segment* pSegment, long long element_start,
+ long long element_size)
+ : Track(pSegment, element_start, element_size),
+ m_colour(NULL),
+ m_projection(NULL) {}
+
+VideoTrack::~VideoTrack() {
+ delete m_colour;
+ delete m_projection;
+}
+
+long VideoTrack::Parse(Segment* pSegment, const Info& info,
+ long long element_start, long long element_size,
+ VideoTrack*& pResult) {
+ if (pResult)
+ return -1;
+
+ if (info.type != Track::kVideo)
+ return -1;
+
+ long long width = 0;
+ long long height = 0;
+ long long display_width = 0;
+ long long display_height = 0;
+ long long display_unit = 0;
+ long long stereo_mode = 0;
+
+ double rate = 0.0;
+
+ IMkvReader* const pReader = pSegment->m_pReader;
+
+ const Settings& s = info.settings;
+ assert(s.start >= 0);
+ assert(s.size >= 0);
+
+ long long pos = s.start;
+ assert(pos >= 0);
+
+ const long long stop = pos + s.size;
+
+ Colour* colour = NULL;
+ Projection* projection = NULL;
+
+ while (pos < stop) {
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvPixelWidth) {
+ width = UnserializeUInt(pReader, pos, size);
+
+ if (width <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvPixelHeight) {
+ height = UnserializeUInt(pReader, pos, size);
+
+ if (height <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvDisplayWidth) {
+ display_width = UnserializeUInt(pReader, pos, size);
+
+ if (display_width <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvDisplayHeight) {
+ display_height = UnserializeUInt(pReader, pos, size);
+
+ if (display_height <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvDisplayUnit) {
+ display_unit = UnserializeUInt(pReader, pos, size);
+
+ if (display_unit < 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvStereoMode) {
+ stereo_mode = UnserializeUInt(pReader, pos, size);
+
+ if (stereo_mode < 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvFrameRate) {
+ const long status = UnserializeFloat(pReader, pos, size, rate);
+
+ if (status < 0)
+ return status;
+
+ if (rate <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvColour) {
+ if (!Colour::Parse(pReader, pos, size, &colour))
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvProjection) {
+ if (!Projection::Parse(pReader, pos, size, &projection))
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ VideoTrack* const pTrack =
+ new (std::nothrow) VideoTrack(pSegment, element_start, element_size);
+
+ if (pTrack == NULL)
+ return -1; // generic error
+
+ const int status = info.Copy(pTrack->m_info);
+
+ if (status) { // error
+ delete pTrack;
+ return status;
+ }
+
+ pTrack->m_width = width;
+ pTrack->m_height = height;
+ pTrack->m_display_width = display_width;
+ pTrack->m_display_height = display_height;
+ pTrack->m_display_unit = display_unit;
+ pTrack->m_stereo_mode = stereo_mode;
+ pTrack->m_rate = rate;
+ pTrack->m_colour = colour;
+ pTrack->m_projection = projection;
+
+ pResult = pTrack;
+ return 0; // success
+}
+
+bool VideoTrack::VetEntry(const BlockEntry* pBlockEntry) const {
+ return Track::VetEntry(pBlockEntry) && pBlockEntry->GetBlock()->IsKey();
+}
+
+long VideoTrack::Seek(long long time_ns, const BlockEntry*& pResult) const {
+ const long status = GetFirst(pResult);
+
+ if (status < 0) // buffer underflow, etc
+ return status;
+
+ assert(pResult);
+
+ if (pResult->EOS())
+ return 0;
+
+ const Cluster* pCluster = pResult->GetCluster();
+ assert(pCluster);
+ assert(pCluster->GetIndex() >= 0);
+
+ if (time_ns <= pResult->GetBlock()->GetTime(pCluster))
+ return 0;
+
+ Cluster** const clusters = m_pSegment->m_clusters;
+ assert(clusters);
+
+ const long count = m_pSegment->GetCount(); // loaded only, not pre-loaded
+ assert(count > 0);
+
+ Cluster** const i = clusters + pCluster->GetIndex();
+ assert(i);
+ assert(*i == pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ Cluster** const j = clusters + count;
+
+ Cluster** lo = i;
+ Cluster** hi = j;
+
+ while (lo < hi) {
+ // INVARIANT:
+ //[i, lo) <= time_ns
+ //[lo, hi) ?
+ //[hi, j) > time_ns
+
+ Cluster** const mid = lo + (hi - lo) / 2;
+ assert(mid < hi);
+
+ pCluster = *mid;
+ assert(pCluster);
+ assert(pCluster->GetIndex() >= 0);
+ assert(pCluster->GetIndex() == long(mid - m_pSegment->m_clusters));
+
+ const long long t = pCluster->GetTime();
+
+ if (t <= time_ns)
+ lo = mid + 1;
+ else
+ hi = mid;
+
+ assert(lo <= hi);
+ }
+
+ assert(lo == hi);
+ assert(lo > i);
+ assert(lo <= j);
+
+ pCluster = *--lo;
+ assert(pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ pResult = pCluster->GetEntry(this, time_ns);
+
+ if ((pResult != 0) && !pResult->EOS()) // found a keyframe
+ return 0;
+
+ while (lo != i) {
+ pCluster = *--lo;
+ assert(pCluster);
+ assert(pCluster->GetTime() <= time_ns);
+
+ pResult = pCluster->GetEntry(this, time_ns);
+
+ if ((pResult != 0) && !pResult->EOS())
+ return 0;
+ }
+
+ // weird: we're on the first cluster, but no keyframe found
+ // should never happen but we must return something anyway
+
+ pResult = GetEOS();
+ return 0;
+}
+
+Colour* VideoTrack::GetColour() const { return m_colour; }
+
+Projection* VideoTrack::GetProjection() const { return m_projection; }
+
+long long VideoTrack::GetWidth() const { return m_width; }
+
+long long VideoTrack::GetHeight() const { return m_height; }
+
+long long VideoTrack::GetDisplayWidth() const {
+ return m_display_width > 0 ? m_display_width : GetWidth();
+}
+
+long long VideoTrack::GetDisplayHeight() const {
+ return m_display_height > 0 ? m_display_height : GetHeight();
+}
+
+long long VideoTrack::GetDisplayUnit() const { return m_display_unit; }
+
+long long VideoTrack::GetStereoMode() const { return m_stereo_mode; }
+
+double VideoTrack::GetFrameRate() const { return m_rate; }
+
+AudioTrack::AudioTrack(Segment* pSegment, long long element_start,
+ long long element_size)
+ : Track(pSegment, element_start, element_size) {}
+
+long AudioTrack::Parse(Segment* pSegment, const Info& info,
+ long long element_start, long long element_size,
+ AudioTrack*& pResult) {
+ if (pResult)
+ return -1;
+
+ if (info.type != Track::kAudio)
+ return -1;
+
+ IMkvReader* const pReader = pSegment->m_pReader;
+
+ const Settings& s = info.settings;
+ assert(s.start >= 0);
+ assert(s.size >= 0);
+
+ long long pos = s.start;
+ assert(pos >= 0);
+
+ const long long stop = pos + s.size;
+
+ double rate = 8000.0; // MKV default
+ long long channels = 1;
+ long long bit_depth = 0;
+
+ while (pos < stop) {
+ long long id, size;
+
+ long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (id == libwebm::kMkvSamplingFrequency) {
+ status = UnserializeFloat(pReader, pos, size, rate);
+
+ if (status < 0)
+ return status;
+
+ if (rate <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvChannels) {
+ channels = UnserializeUInt(pReader, pos, size);
+
+ if (channels <= 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvBitDepth) {
+ bit_depth = UnserializeUInt(pReader, pos, size);
+
+ if (bit_depth <= 0)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ AudioTrack* const pTrack =
+ new (std::nothrow) AudioTrack(pSegment, element_start, element_size);
+
+ if (pTrack == NULL)
+ return -1; // generic error
+
+ const int status = info.Copy(pTrack->m_info);
+
+ if (status) {
+ delete pTrack;
+ return status;
+ }
+
+ pTrack->m_rate = rate;
+ pTrack->m_channels = channels;
+ pTrack->m_bitDepth = bit_depth;
+
+ pResult = pTrack;
+ return 0; // success
+}
+
+double AudioTrack::GetSamplingRate() const { return m_rate; }
+
+long long AudioTrack::GetChannels() const { return m_channels; }
+
+long long AudioTrack::GetBitDepth() const { return m_bitDepth; }
+
+Tracks::Tracks(Segment* pSegment, long long start, long long size_,
+ long long element_start, long long element_size)
+ : m_pSegment(pSegment),
+ m_start(start),
+ m_size(size_),
+ m_element_start(element_start),
+ m_element_size(element_size),
+ m_trackEntries(NULL),
+ m_trackEntriesEnd(NULL) {}
+
+long Tracks::Parse() {
+ assert(m_trackEntries == NULL);
+ assert(m_trackEntriesEnd == NULL);
+
+ const long long stop = m_start + m_size;
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ int count = 0;
+ long long pos = m_start;
+
+ while (pos < stop) {
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size == 0) // weird
+ continue;
+
+ if (id == libwebm::kMkvTrackEntry)
+ ++count;
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (count <= 0)
+ return 0; // success
+
+ m_trackEntries = new (std::nothrow) Track*[count];
+
+ if (m_trackEntries == NULL)
+ return -1;
+
+ m_trackEntriesEnd = m_trackEntries;
+
+ pos = m_start;
+
+ while (pos < stop) {
+ const long long element_start = pos;
+
+ long long id, payload_size;
+
+ const long status =
+ ParseElementHeader(pReader, pos, stop, id, payload_size);
+
+ if (status < 0) // error
+ return status;
+
+ if (payload_size == 0) // weird
+ continue;
+
+ const long long payload_stop = pos + payload_size;
+ assert(payload_stop <= stop); // checked in ParseElement
+
+ const long long element_size = payload_stop - element_start;
+
+ if (id == libwebm::kMkvTrackEntry) {
+ Track*& pTrack = *m_trackEntriesEnd;
+ pTrack = NULL;
+
+ const long status = ParseTrackEntry(pos, payload_size, element_start,
+ element_size, pTrack);
+ if (status)
+ return status;
+
+ if (pTrack)
+ ++m_trackEntriesEnd;
+ }
+
+ pos = payload_stop;
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ return 0; // success
+}
+
+unsigned long Tracks::GetTracksCount() const {
+ const ptrdiff_t result = m_trackEntriesEnd - m_trackEntries;
+ assert(result >= 0);
+
+ return static_cast<unsigned long>(result);
+}
+
+long Tracks::ParseTrackEntry(long long track_start, long long track_size,
+ long long element_start, long long element_size,
+ Track*& pResult) const {
+ if (pResult)
+ return -1;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = track_start;
+ const long long track_stop = track_start + track_size;
+
+ Track::Info info;
+
+ info.type = 0;
+ info.number = 0;
+ info.uid = 0;
+ info.defaultDuration = 0;
+
+ Track::Settings v;
+ v.start = -1;
+ v.size = -1;
+
+ Track::Settings a;
+ a.start = -1;
+ a.size = -1;
+
+ Track::Settings e; // content_encodings_settings;
+ e.start = -1;
+ e.size = -1;
+
+ long long lacing = 1; // default is true
+
+ while (pos < track_stop) {
+ long long id, size;
+
+ const long status = ParseElementHeader(pReader, pos, track_stop, id, size);
+
+ if (status < 0) // error
+ return status;
+
+ if (size < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long start = pos;
+
+ if (id == libwebm::kMkvVideo) {
+ v.start = start;
+ v.size = size;
+ } else if (id == libwebm::kMkvAudio) {
+ a.start = start;
+ a.size = size;
+ } else if (id == libwebm::kMkvContentEncodings) {
+ e.start = start;
+ e.size = size;
+ } else if (id == libwebm::kMkvTrackUID) {
+ if (size > 8)
+ return E_FILE_FORMAT_INVALID;
+
+ info.uid = 0;
+
+ long long pos_ = start;
+ const long long pos_end = start + size;
+
+ while (pos_ != pos_end) {
+ unsigned char b;
+
+ const int status = pReader->Read(pos_, 1, &b);
+
+ if (status)
+ return status;
+
+ info.uid <<= 8;
+ info.uid |= b;
+
+ ++pos_;
+ }
+ } else if (id == libwebm::kMkvTrackNumber) {
+ const long long num = UnserializeUInt(pReader, pos, size);
+
+ if ((num <= 0) || (num > 127))
+ return E_FILE_FORMAT_INVALID;
+
+ info.number = static_cast<long>(num);
+ } else if (id == libwebm::kMkvTrackType) {
+ const long long type = UnserializeUInt(pReader, pos, size);
+
+ if ((type <= 0) || (type > 254))
+ return E_FILE_FORMAT_INVALID;
+
+ info.type = static_cast<long>(type);
+ } else if (id == libwebm::kMkvName) {
+ const long status =
+ UnserializeString(pReader, pos, size, info.nameAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvLanguage) {
+ const long status = UnserializeString(pReader, pos, size, info.language);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvDefaultDuration) {
+ const long long duration = UnserializeUInt(pReader, pos, size);
+
+ if (duration < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.defaultDuration = static_cast<unsigned long long>(duration);
+ } else if (id == libwebm::kMkvCodecID) {
+ const long status = UnserializeString(pReader, pos, size, info.codecId);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvFlagLacing) {
+ lacing = UnserializeUInt(pReader, pos, size);
+
+ if ((lacing < 0) || (lacing > 1))
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvCodecPrivate) {
+ delete[] info.codecPrivate;
+ info.codecPrivate = NULL;
+ info.codecPrivateSize = 0;
+
+ const size_t buflen = static_cast<size_t>(size);
+
+ if (buflen) {
+ unsigned char* buf = SafeArrayAlloc<unsigned char>(1, buflen);
+
+ if (buf == NULL)
+ return -1;
+
+ const int status = pReader->Read(pos, static_cast<long>(buflen), buf);
+
+ if (status) {
+ delete[] buf;
+ return status;
+ }
+
+ info.codecPrivate = buf;
+ info.codecPrivateSize = buflen;
+ }
+ } else if (id == libwebm::kMkvCodecName) {
+ const long status =
+ UnserializeString(pReader, pos, size, info.codecNameAsUTF8);
+
+ if (status)
+ return status;
+ } else if (id == libwebm::kMkvCodecDelay) {
+ info.codecDelay = UnserializeUInt(pReader, pos, size);
+ } else if (id == libwebm::kMkvSeekPreRoll) {
+ info.seekPreRoll = UnserializeUInt(pReader, pos, size);
+ }
+
+ pos += size; // consume payload
+ if (pos > track_stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != track_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (info.number <= 0) // not specified
+ return E_FILE_FORMAT_INVALID;
+
+ if (GetTrackByNumber(info.number))
+ return E_FILE_FORMAT_INVALID;
+
+ if (info.type <= 0) // not specified
+ return E_FILE_FORMAT_INVALID;
+
+ info.lacing = (lacing > 0) ? true : false;
+
+ if (info.type == Track::kVideo) {
+ if (v.start < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (a.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.settings = v;
+
+ VideoTrack* pTrack = NULL;
+
+ const long status = VideoTrack::Parse(m_pSegment, info, element_start,
+ element_size, pTrack);
+
+ if (status)
+ return status;
+
+ pResult = pTrack;
+ assert(pResult);
+
+ if (e.start >= 0)
+ pResult->ParseContentEncodingsEntry(e.start, e.size);
+ } else if (info.type == Track::kAudio) {
+ if (a.start < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (v.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.settings = a;
+
+ AudioTrack* pTrack = NULL;
+
+ const long status = AudioTrack::Parse(m_pSegment, info, element_start,
+ element_size, pTrack);
+
+ if (status)
+ return status;
+
+ pResult = pTrack;
+ assert(pResult);
+
+ if (e.start >= 0)
+ pResult->ParseContentEncodingsEntry(e.start, e.size);
+ } else {
+ // neither video nor audio - probably metadata or subtitles
+
+ if (a.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (v.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (info.type == Track::kMetadata && e.start >= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ info.settings.start = -1;
+ info.settings.size = 0;
+
+ Track* pTrack = NULL;
+
+ const long status =
+ Track::Create(m_pSegment, info, element_start, element_size, pTrack);
+
+ if (status)
+ return status;
+
+ pResult = pTrack;
+ assert(pResult);
+ }
+
+ return 0; // success
+}
+
+Tracks::~Tracks() {
+ Track** i = m_trackEntries;
+ Track** const j = m_trackEntriesEnd;
+
+ while (i != j) {
+ Track* const pTrack = *i++;
+ delete pTrack;
+ }
+
+ delete[] m_trackEntries;
+}
+
+const Track* Tracks::GetTrackByNumber(long tn) const {
+ if (tn < 0)
+ return NULL;
+
+ Track** i = m_trackEntries;
+ Track** const j = m_trackEntriesEnd;
+
+ while (i != j) {
+ Track* const pTrack = *i++;
+
+ if (pTrack == NULL)
+ continue;
+
+ if (tn == pTrack->GetNumber())
+ return pTrack;
+ }
+
+ return NULL; // not found
+}
+
+const Track* Tracks::GetTrackByIndex(unsigned long idx) const {
+ const ptrdiff_t count = m_trackEntriesEnd - m_trackEntries;
+
+ if (idx >= static_cast<unsigned long>(count))
+ return NULL;
+
+ return m_trackEntries[idx];
+}
+
+long Cluster::Load(long long& pos, long& len) const {
+ if (m_pSegment == NULL)
+ return E_PARSE_FAILED;
+
+ if (m_timecode >= 0) // at least partially loaded
+ return 0;
+
+ if (m_pos != m_element_start || m_element_size >= 0)
+ return E_PARSE_FAILED;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+ long long total, avail;
+ const int status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ if (total >= 0 && (avail > total || m_pos > total))
+ return E_FILE_FORMAT_INVALID;
+
+ pos = m_pos;
+
+ long long cluster_size = -1;
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error or underflow
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id_ = ReadID(pReader, pos, len);
+
+ if (id_ < 0) // error
+ return static_cast<long>(id_);
+
+ if (id_ != libwebm::kMkvCluster)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume id
+
+ // read cluster size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(cluster_size);
+
+ if (size == 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume length of size of element
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size != unknown_size)
+ cluster_size = size;
+
+ // pos points to start of payload
+ long long timecode = -1;
+ long long new_pos = -1;
+ bool bBlock = false;
+
+ long long cluster_stop = (cluster_size < 0) ? -1 : pos + cluster_size;
+
+ for (;;) {
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ break;
+
+ // Parse ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadID(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id == 0)
+ return E_FILE_FORMAT_INVALID;
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if (id == libwebm::kMkvCluster)
+ break;
+
+ if (id == libwebm::kMkvCues)
+ break;
+
+ pos += len; // consume ID field
+
+ // Parse Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume size field
+
+ if ((cluster_stop >= 0) && (pos > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // pos now points to start of payload
+
+ if (size == 0)
+ continue;
+
+ if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == libwebm::kMkvTimecode) {
+ len = static_cast<long>(size);
+
+ if ((pos + size) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ timecode = UnserializeUInt(pReader, pos, size);
+
+ if (timecode < 0) // error (or underflow)
+ return static_cast<long>(timecode);
+
+ new_pos = pos + size;
+
+ if (bBlock)
+ break;
+ } else if (id == libwebm::kMkvBlockGroup) {
+ bBlock = true;
+ break;
+ } else if (id == libwebm::kMkvSimpleBlock) {
+ bBlock = true;
+ break;
+ }
+
+ pos += size; // consume payload
+ if (cluster_stop >= 0 && pos > cluster_stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (cluster_stop >= 0 && pos > cluster_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (timecode < 0) // no timecode found
+ return E_FILE_FORMAT_INVALID;
+
+ if (!bBlock)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pos = new_pos; // designates position just beyond timecode payload
+ m_timecode = timecode; // m_timecode >= 0 means we're partially loaded
+
+ if (cluster_size >= 0)
+ m_element_size = cluster_stop - m_element_start;
+
+ return 0;
+}
+
+long Cluster::Parse(long long& pos, long& len) const {
+ long status = Load(pos, len);
+
+ if (status < 0)
+ return status;
+
+ if (m_pos < m_element_start || m_timecode < 0)
+ return E_PARSE_FAILED;
+
+ const long long cluster_stop =
+ (m_element_size < 0) ? -1 : m_element_start + m_element_size;
+
+ if ((cluster_stop >= 0) && (m_pos >= cluster_stop))
+ return 1; // nothing else to do
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long total, avail;
+
+ status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ if (total >= 0 && avail > total)
+ return E_FILE_FORMAT_INVALID;
+
+ pos = m_pos;
+
+ for (;;) {
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ break;
+
+ if ((total >= 0) && (pos >= total)) {
+ if (m_element_size < 0)
+ m_element_size = pos - m_element_start;
+
+ break;
+ }
+
+ // Parse ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadID(pReader, pos, len);
+
+ if (id < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if ((id == libwebm::kMkvCluster) || (id == libwebm::kMkvCues)) {
+ if (m_element_size < 0)
+ m_element_size = pos - m_element_start;
+
+ break;
+ }
+
+ pos += len; // consume ID field
+
+ // Parse Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0)
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume size field
+
+ if ((cluster_stop >= 0) && (pos > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // pos now points to start of payload
+
+ if (size == 0)
+ continue;
+
+ // const long long block_start = pos;
+ const long long block_stop = pos + size;
+
+ if (cluster_stop >= 0) {
+ if (block_stop > cluster_stop) {
+ if (id == libwebm::kMkvBlockGroup || id == libwebm::kMkvSimpleBlock) {
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ pos = cluster_stop;
+ break;
+ }
+ } else if ((total >= 0) && (block_stop > total)) {
+ m_element_size = total - m_element_start;
+ pos = total;
+ break;
+ } else if (block_stop > avail) {
+ len = static_cast<long>(size);
+ return E_BUFFER_NOT_FULL;
+ }
+
+ Cluster* const this_ = const_cast<Cluster*>(this);
+
+ if (id == libwebm::kMkvBlockGroup)
+ return this_->ParseBlockGroup(size, pos, len);
+
+ if (id == libwebm::kMkvSimpleBlock)
+ return this_->ParseSimpleBlock(size, pos, len);
+
+ pos += size; // consume payload
+ if (cluster_stop >= 0 && pos > cluster_stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (m_element_size < 1)
+ return E_FILE_FORMAT_INVALID;
+
+ m_pos = pos;
+ if (cluster_stop >= 0 && m_pos > cluster_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (m_entries_count > 0) {
+ const long idx = m_entries_count - 1;
+
+ const BlockEntry* const pLast = m_entries[idx];
+ if (pLast == NULL)
+ return E_PARSE_FAILED;
+
+ const Block* const pBlock = pLast->GetBlock();
+ if (pBlock == NULL)
+ return E_PARSE_FAILED;
+
+ const long long start = pBlock->m_start;
+
+ if ((total >= 0) && (start > total))
+ return E_PARSE_FAILED; // defend against trucated stream
+
+ const long long size = pBlock->m_size;
+
+ const long long stop = start + size;
+ if (cluster_stop >= 0 && stop > cluster_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && (stop > total))
+ return E_PARSE_FAILED; // defend against trucated stream
+ }
+
+ return 1; // no more entries
+}
+
+long Cluster::ParseSimpleBlock(long long block_size, long long& pos,
+ long& len) {
+ const long long block_start = pos;
+ const long long block_stop = pos + block_size;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long total, avail;
+
+ long status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ // parse track number
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long track = ReadUInt(pReader, pos, len);
+
+ if (track < 0) // error
+ return static_cast<long>(track);
+
+ if (track == 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume track number
+
+ if ((pos + 2) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + 2) > avail) {
+ len = 2;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ pos += 2; // consume timecode
+
+ if ((pos + 1) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ unsigned char flags;
+
+ status = pReader->Read(pos, 1, &flags);
+
+ if (status < 0) { // error or underflow
+ len = 1;
+ return status;
+ }
+
+ ++pos; // consume flags byte
+ assert(pos <= avail);
+
+ if (pos >= block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ const int lacing = int(flags & 0x06) >> 1;
+
+ if ((lacing != 0) && (block_stop > avail)) {
+ len = static_cast<long>(block_stop - pos);
+ return E_BUFFER_NOT_FULL;
+ }
+
+ status = CreateBlock(libwebm::kMkvSimpleBlock, block_start, block_size,
+ 0); // DiscardPadding
+
+ if (status != 0)
+ return status;
+
+ m_pos = block_stop;
+
+ return 0; // success
+}
+
+long Cluster::ParseBlockGroup(long long payload_size, long long& pos,
+ long& len) {
+ const long long payload_start = pos;
+ const long long payload_stop = pos + payload_size;
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long total, avail;
+
+ long status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ if ((total >= 0) && (payload_stop > total))
+ return E_FILE_FORMAT_INVALID;
+
+ if (payload_stop > avail) {
+ len = static_cast<long>(payload_size);
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long discard_padding = 0;
+
+ while (pos < payload_stop) {
+ // parse sub-block element ID
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > payload_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadID(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id == 0) // not a valid ID
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume ID field
+
+ // Parse Size
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > payload_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume size field
+
+ // pos now points to start of sub-block group payload
+
+ if (pos > payload_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if (size == 0) // weird
+ continue;
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == libwebm::kMkvDiscardPadding) {
+ status = UnserializeInt(pReader, pos, size, discard_padding);
+
+ if (status < 0) // error
+ return status;
+ }
+
+ if (id != libwebm::kMkvBlock) {
+ pos += size; // consume sub-part of block group
+
+ if (pos > payload_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ continue;
+ }
+
+ const long long block_stop = pos + size;
+
+ if (block_stop > payload_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ // parse track number
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((pos + len) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long track = ReadUInt(pReader, pos, len);
+
+ if (track < 0) // error
+ return static_cast<long>(track);
+
+ if (track == 0)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume track number
+
+ if ((pos + 2) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + 2) > avail) {
+ len = 2;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ pos += 2; // consume timecode
+
+ if ((pos + 1) > block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ unsigned char flags;
+
+ status = pReader->Read(pos, 1, &flags);
+
+ if (status < 0) { // error or underflow
+ len = 1;
+ return status;
+ }
+
+ ++pos; // consume flags byte
+ assert(pos <= avail);
+
+ if (pos >= block_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ const int lacing = int(flags & 0x06) >> 1;
+
+ if ((lacing != 0) && (block_stop > avail)) {
+ len = static_cast<long>(block_stop - pos);
+ return E_BUFFER_NOT_FULL;
+ }
+
+ pos = block_stop; // consume block-part of block group
+ if (pos > payload_stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ if (pos != payload_stop)
+ return E_FILE_FORMAT_INVALID;
+
+ status = CreateBlock(libwebm::kMkvBlockGroup, payload_start, payload_size,
+ discard_padding);
+ if (status != 0)
+ return status;
+
+ m_pos = payload_stop;
+
+ return 0; // success
+}
+
+long Cluster::GetEntry(long index, const mkvparser::BlockEntry*& pEntry) const {
+ assert(m_pos >= m_element_start);
+
+ pEntry = NULL;
+
+ if (index < 0)
+ return -1; // generic error
+
+ if (m_entries_count < 0)
+ return E_BUFFER_NOT_FULL;
+
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count <= m_entries_size);
+
+ if (index < m_entries_count) {
+ pEntry = m_entries[index];
+ assert(pEntry);
+
+ return 1; // found entry
+ }
+
+ if (m_element_size < 0) // we don't know cluster end yet
+ return E_BUFFER_NOT_FULL; // underflow
+
+ const long long element_stop = m_element_start + m_element_size;
+
+ if (m_pos >= element_stop)
+ return 0; // nothing left to parse
+
+ return E_BUFFER_NOT_FULL; // underflow, since more remains to be parsed
+}
+
+Cluster* Cluster::Create(Segment* pSegment, long idx, long long off) {
+ if (!pSegment || off < 0)
+ return NULL;
+
+ const long long element_start = pSegment->m_start + off;
+
+ Cluster* const pCluster =
+ new (std::nothrow) Cluster(pSegment, idx, element_start);
+
+ return pCluster;
+}
+
+Cluster::Cluster()
+ : m_pSegment(NULL),
+ m_element_start(0),
+ m_index(0),
+ m_pos(0),
+ m_element_size(0),
+ m_timecode(0),
+ m_entries(NULL),
+ m_entries_size(0),
+ m_entries_count(0) // means "no entries"
+{}
+
+Cluster::Cluster(Segment* pSegment, long idx, long long element_start
+ /* long long element_size */)
+ : m_pSegment(pSegment),
+ m_element_start(element_start),
+ m_index(idx),
+ m_pos(element_start),
+ m_element_size(-1 /* element_size */),
+ m_timecode(-1),
+ m_entries(NULL),
+ m_entries_size(0),
+ m_entries_count(-1) // means "has not been parsed yet"
+{}
+
+Cluster::~Cluster() {
+ if (m_entries_count <= 0) {
+ delete[] m_entries;
+ return;
+ }
+
+ BlockEntry** i = m_entries;
+ BlockEntry** const j = m_entries + m_entries_count;
+
+ while (i != j) {
+ BlockEntry* p = *i++;
+ assert(p);
+
+ delete p;
+ }
+
+ delete[] m_entries;
+}
+
+bool Cluster::EOS() const { return (m_pSegment == NULL); }
+
+long Cluster::GetIndex() const { return m_index; }
+
+long long Cluster::GetPosition() const {
+ const long long pos = m_element_start - m_pSegment->m_start;
+ assert(pos >= 0);
+
+ return pos;
+}
+
+long long Cluster::GetElementSize() const { return m_element_size; }
+
+long Cluster::HasBlockEntries(
+ const Segment* pSegment,
+ long long off, // relative to start of segment payload
+ long long& pos, long& len) {
+ assert(pSegment);
+ assert(off >= 0); // relative to segment
+
+ IMkvReader* const pReader = pSegment->m_pReader;
+
+ long long total, avail;
+
+ long status = pReader->Length(&total, &avail);
+
+ if (status < 0) // error
+ return status;
+
+ assert((total < 0) || (avail <= total));
+
+ pos = pSegment->m_start + off; // absolute
+
+ if ((total >= 0) && (pos >= total))
+ return 0; // we don't even have a complete cluster
+
+ const long long segment_stop =
+ (pSegment->m_size < 0) ? -1 : pSegment->m_start + pSegment->m_size;
+
+ long long cluster_stop = -1; // interpreted later to mean "unknown size"
+
+ {
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // need more data
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && ((pos + len) > total))
+ return 0;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadID(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ if (id != libwebm::kMkvCluster)
+ return E_PARSE_FAILED;
+
+ pos += len; // consume Cluster ID field
+
+ // read size field
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // weird
+ return E_BUFFER_NOT_FULL;
+
+ if ((segment_stop >= 0) && ((pos + len) > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && ((pos + len) > total))
+ return 0;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ if (size == 0)
+ return 0; // cluster does not have entries
+
+ pos += len; // consume size field
+
+ // pos now points to start of payload
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size != unknown_size) {
+ cluster_stop = pos + size;
+ assert(cluster_stop >= 0);
+
+ if ((segment_stop >= 0) && (cluster_stop > segment_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((total >= 0) && (cluster_stop > total))
+ // return E_FILE_FORMAT_INVALID; //too conservative
+ return 0; // cluster does not have any entries
+ }
+ }
+
+ for (;;) {
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ return 0; // no entries detected
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ long long result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // need more data
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long id = ReadID(pReader, pos, len);
+
+ if (id < 0) // error
+ return static_cast<long>(id);
+
+ // This is the distinguished set of ID's we use to determine
+ // that we have exhausted the sub-element's inside the cluster
+ // whose ID we parsed earlier.
+
+ if (id == libwebm::kMkvCluster)
+ return 0; // no entries found
+
+ if (id == libwebm::kMkvCues)
+ return 0; // no entries found
+
+ pos += len; // consume id field
+
+ if ((cluster_stop >= 0) && (pos >= cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ // read size field
+
+ if ((pos + 1) > avail) {
+ len = 1;
+ return E_BUFFER_NOT_FULL;
+ }
+
+ result = GetUIntLength(pReader, pos, len);
+
+ if (result < 0) // error
+ return static_cast<long>(result);
+
+ if (result > 0) // underflow
+ return E_BUFFER_NOT_FULL;
+
+ if ((cluster_stop >= 0) && ((pos + len) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > avail)
+ return E_BUFFER_NOT_FULL;
+
+ const long long size = ReadUInt(pReader, pos, len);
+
+ if (size < 0) // error
+ return static_cast<long>(size);
+
+ pos += len; // consume size field
+
+ // pos now points to start of payload
+
+ if ((cluster_stop >= 0) && (pos > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if (size == 0) // weird
+ continue;
+
+ const long long unknown_size = (1LL << (7 * len)) - 1;
+
+ if (size == unknown_size)
+ return E_FILE_FORMAT_INVALID; // not supported inside cluster
+
+ if ((cluster_stop >= 0) && ((pos + size) > cluster_stop))
+ return E_FILE_FORMAT_INVALID;
+
+ if (id == libwebm::kMkvBlockGroup)
+ return 1; // have at least one entry
+
+ if (id == libwebm::kMkvSimpleBlock)
+ return 1; // have at least one entry
+
+ pos += size; // consume payload
+ if (cluster_stop >= 0 && pos > cluster_stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+}
+
+long long Cluster::GetTimeCode() const {
+ long long pos;
+ long len;
+
+ const long status = Load(pos, len);
+
+ if (status < 0) // error
+ return status;
+
+ return m_timecode;
+}
+
+long long Cluster::GetTime() const {
+ const long long tc = GetTimeCode();
+
+ if (tc < 0)
+ return tc;
+
+ const SegmentInfo* const pInfo = m_pSegment->GetInfo();
+ assert(pInfo);
+
+ const long long scale = pInfo->GetTimeCodeScale();
+ assert(scale >= 1);
+
+ const long long t = m_timecode * scale;
+
+ return t;
+}
+
+long long Cluster::GetFirstTime() const {
+ const BlockEntry* pEntry;
+
+ const long status = GetFirst(pEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pEntry == NULL) // empty cluster
+ return GetTime();
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ return pBlock->GetTime(this);
+}
+
+long long Cluster::GetLastTime() const {
+ const BlockEntry* pEntry;
+
+ const long status = GetLast(pEntry);
+
+ if (status < 0) // error
+ return status;
+
+ if (pEntry == NULL) // empty cluster
+ return GetTime();
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ return pBlock->GetTime(this);
+}
+
+long Cluster::CreateBlock(long long id,
+ long long pos, // absolute pos of payload
+ long long size, long long discard_padding) {
+ if (id != libwebm::kMkvBlockGroup && id != libwebm::kMkvSimpleBlock)
+ return E_PARSE_FAILED;
+
+ if (m_entries_count < 0) { // haven't parsed anything yet
+ assert(m_entries == NULL);
+ assert(m_entries_size == 0);
+
+ m_entries_size = 1024;
+ m_entries = new (std::nothrow) BlockEntry*[m_entries_size];
+ if (m_entries == NULL)
+ return -1;
+
+ m_entries_count = 0;
+ } else {
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count <= m_entries_size);
+
+ if (m_entries_count >= m_entries_size) {
+ const long entries_size = 2 * m_entries_size;
+
+ BlockEntry** const entries = new (std::nothrow) BlockEntry*[entries_size];
+ if (entries == NULL)
+ return -1;
+
+ BlockEntry** src = m_entries;
+ BlockEntry** const src_end = src + m_entries_count;
+
+ BlockEntry** dst = entries;
+
+ while (src != src_end)
+ *dst++ = *src++;
+
+ delete[] m_entries;
+
+ m_entries = entries;
+ m_entries_size = entries_size;
+ }
+ }
+
+ if (id == libwebm::kMkvBlockGroup)
+ return CreateBlockGroup(pos, size, discard_padding);
+ else
+ return CreateSimpleBlock(pos, size);
+}
+
+long Cluster::CreateBlockGroup(long long start_offset, long long size,
+ long long discard_padding) {
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count >= 0);
+ assert(m_entries_count < m_entries_size);
+
+ IMkvReader* const pReader = m_pSegment->m_pReader;
+
+ long long pos = start_offset;
+ const long long stop = start_offset + size;
+
+ // For WebM files, there is a bias towards previous reference times
+ //(in order to support alt-ref frames, which refer back to the previous
+ // keyframe). Normally a 0 value is not possible, but here we tenatively
+ // allow 0 as the value of a reference frame, with the interpretation
+ // that this is a "previous" reference time.
+
+ long long prev = 1; // nonce
+ long long next = 0; // nonce
+ long long duration = -1; // really, this is unsigned
+
+ long long bpos = -1;
+ long long bsize = -1;
+
+ while (pos < stop) {
+ long len;
+ const long long id = ReadID(pReader, pos, len);
+ if (id < 0 || (pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume ID
+
+ const long long size = ReadUInt(pReader, pos, len);
+ assert(size >= 0); // TODO
+ assert((pos + len) <= stop);
+
+ pos += len; // consume size
+
+ if (id == libwebm::kMkvBlock) {
+ if (bpos < 0) { // Block ID
+ bpos = pos;
+ bsize = size;
+ }
+ } else if (id == libwebm::kMkvBlockDuration) {
+ if (size > 8)
+ return E_FILE_FORMAT_INVALID;
+
+ duration = UnserializeUInt(pReader, pos, size);
+
+ if (duration < 0)
+ return E_FILE_FORMAT_INVALID;
+ } else if (id == libwebm::kMkvReferenceBlock) {
+ if (size > 8 || size <= 0)
+ return E_FILE_FORMAT_INVALID;
+ const long size_ = static_cast<long>(size);
+
+ long long time;
+
+ long status = UnserializeInt(pReader, pos, size_, time);
+ assert(status == 0);
+ if (status != 0)
+ return -1;
+
+ if (time <= 0) // see note above
+ prev = time;
+ else
+ next = time;
+ }
+
+ pos += size; // consume payload
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+ if (bpos < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ assert(bsize >= 0);
+
+ const long idx = m_entries_count;
+
+ BlockEntry** const ppEntry = m_entries + idx;
+ BlockEntry*& pEntry = *ppEntry;
+
+ pEntry = new (std::nothrow)
+ BlockGroup(this, idx, bpos, bsize, prev, next, duration, discard_padding);
+
+ if (pEntry == NULL)
+ return -1; // generic error
+
+ BlockGroup* const p = static_cast<BlockGroup*>(pEntry);
+
+ const long status = p->Parse();
+
+ if (status == 0) { // success
+ ++m_entries_count;
+ return 0;
+ }
+
+ delete pEntry;
+ pEntry = 0;
+
+ return status;
+}
+
+long Cluster::CreateSimpleBlock(long long st, long long sz) {
+ assert(m_entries);
+ assert(m_entries_size > 0);
+ assert(m_entries_count >= 0);
+ assert(m_entries_count < m_entries_size);
+
+ const long idx = m_entries_count;
+
+ BlockEntry** const ppEntry = m_entries + idx;
+ BlockEntry*& pEntry = *ppEntry;
+
+ pEntry = new (std::nothrow) SimpleBlock(this, idx, st, sz);
+
+ if (pEntry == NULL)
+ return -1; // generic error
+
+ SimpleBlock* const p = static_cast<SimpleBlock*>(pEntry);
+
+ const long status = p->Parse();
+
+ if (status == 0) {
+ ++m_entries_count;
+ return 0;
+ }
+
+ delete pEntry;
+ pEntry = 0;
+
+ return status;
+}
+
+long Cluster::GetFirst(const BlockEntry*& pFirst) const {
+ if (m_entries_count <= 0) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) { // error
+ pFirst = NULL;
+ return status;
+ }
+
+ if (m_entries_count <= 0) { // empty cluster
+ pFirst = NULL;
+ return 0;
+ }
+ }
+
+ assert(m_entries);
+
+ pFirst = m_entries[0];
+ assert(pFirst);
+
+ return 0; // success
+}
+
+long Cluster::GetLast(const BlockEntry*& pLast) const {
+ for (;;) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) { // error
+ pLast = NULL;
+ return status;
+ }
+
+ if (status > 0) // no new block
+ break;
+ }
+
+ if (m_entries_count <= 0) {
+ pLast = NULL;
+ return 0;
+ }
+
+ assert(m_entries);
+
+ const long idx = m_entries_count - 1;
+
+ pLast = m_entries[idx];
+ assert(pLast);
+
+ return 0;
+}
+
+long Cluster::GetNext(const BlockEntry* pCurr, const BlockEntry*& pNext) const {
+ assert(pCurr);
+ assert(m_entries);
+ assert(m_entries_count > 0);
+
+ size_t idx = pCurr->GetIndex();
+ assert(idx < size_t(m_entries_count));
+ assert(m_entries[idx] == pCurr);
+
+ ++idx;
+
+ if (idx >= size_t(m_entries_count)) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) { // error
+ pNext = NULL;
+ return status;
+ }
+
+ if (status > 0) {
+ pNext = NULL;
+ return 0;
+ }
+
+ assert(m_entries);
+ assert(m_entries_count > 0);
+ assert(idx < size_t(m_entries_count));
+ }
+
+ pNext = m_entries[idx];
+ assert(pNext);
+
+ return 0;
+}
+
+long Cluster::GetEntryCount() const { return m_entries_count; }
+
+const BlockEntry* Cluster::GetEntry(const Track* pTrack,
+ long long time_ns) const {
+ assert(pTrack);
+
+ if (m_pSegment == NULL) // this is the special EOS cluster
+ return pTrack->GetEOS();
+
+ const BlockEntry* pResult = pTrack->GetEOS();
+
+ long index = 0;
+
+ for (;;) {
+ if (index >= m_entries_count) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+ assert(status >= 0);
+
+ if (status > 0) // completely parsed, and no more entries
+ return pResult;
+
+ if (status < 0) // should never happen
+ return 0;
+
+ assert(m_entries);
+ assert(index < m_entries_count);
+ }
+
+ const BlockEntry* const pEntry = m_entries[index];
+ assert(pEntry);
+ assert(!pEntry->EOS());
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ if (pBlock->GetTrackNumber() != pTrack->GetNumber()) {
+ ++index;
+ continue;
+ }
+
+ if (pTrack->VetEntry(pEntry)) {
+ if (time_ns < 0) // just want first candidate block
+ return pEntry;
+
+ const long long ns = pBlock->GetTime(this);
+
+ if (ns > time_ns)
+ return pResult;
+
+ pResult = pEntry; // have a candidate
+ } else if (time_ns >= 0) {
+ const long long ns = pBlock->GetTime(this);
+
+ if (ns > time_ns)
+ return pResult;
+ }
+
+ ++index;
+ }
+}
+
+const BlockEntry* Cluster::GetEntry(const CuePoint& cp,
+ const CuePoint::TrackPosition& tp) const {
+ assert(m_pSegment);
+ const long long tc = cp.GetTimeCode();
+
+ if (tp.m_block > 0) {
+ const long block = static_cast<long>(tp.m_block);
+ const long index = block - 1;
+
+ while (index >= m_entries_count) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) // TODO: can this happen?
+ return NULL;
+
+ if (status > 0) // nothing remains to be parsed
+ return NULL;
+ }
+
+ const BlockEntry* const pEntry = m_entries[index];
+ assert(pEntry);
+ assert(!pEntry->EOS());
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ if ((pBlock->GetTrackNumber() == tp.m_track) &&
+ (pBlock->GetTimeCode(this) == tc)) {
+ return pEntry;
+ }
+ }
+
+ long index = 0;
+
+ for (;;) {
+ if (index >= m_entries_count) {
+ long long pos;
+ long len;
+
+ const long status = Parse(pos, len);
+
+ if (status < 0) // TODO: can this happen?
+ return NULL;
+
+ if (status > 0) // nothing remains to be parsed
+ return NULL;
+
+ assert(m_entries);
+ assert(index < m_entries_count);
+ }
+
+ const BlockEntry* const pEntry = m_entries[index];
+ assert(pEntry);
+ assert(!pEntry->EOS());
+
+ const Block* const pBlock = pEntry->GetBlock();
+ assert(pBlock);
+
+ if (pBlock->GetTrackNumber() != tp.m_track) {
+ ++index;
+ continue;
+ }
+
+ const long long tc_ = pBlock->GetTimeCode(this);
+
+ if (tc_ < tc) {
+ ++index;
+ continue;
+ }
+
+ if (tc_ > tc)
+ return NULL;
+
+ const Tracks* const pTracks = m_pSegment->GetTracks();
+ assert(pTracks);
+
+ const long tn = static_cast<long>(tp.m_track);
+ const Track* const pTrack = pTracks->GetTrackByNumber(tn);
+
+ if (pTrack == NULL)
+ return NULL;
+
+ const long long type = pTrack->GetType();
+
+ if (type == 2) // audio
+ return pEntry;
+
+ if (type != 1) // not video
+ return NULL;
+
+ if (!pBlock->IsKey())
+ return NULL;
+
+ return pEntry;
+ }
+}
+
+BlockEntry::BlockEntry(Cluster* p, long idx) : m_pCluster(p), m_index(idx) {}
+BlockEntry::~BlockEntry() {}
+const Cluster* BlockEntry::GetCluster() const { return m_pCluster; }
+long BlockEntry::GetIndex() const { return m_index; }
+
+SimpleBlock::SimpleBlock(Cluster* pCluster, long idx, long long start,
+ long long size)
+ : BlockEntry(pCluster, idx), m_block(start, size, 0) {}
+
+long SimpleBlock::Parse() { return m_block.Parse(m_pCluster); }
+BlockEntry::Kind SimpleBlock::GetKind() const { return kBlockSimple; }
+const Block* SimpleBlock::GetBlock() const { return &m_block; }
+
+BlockGroup::BlockGroup(Cluster* pCluster, long idx, long long block_start,
+ long long block_size, long long prev, long long next,
+ long long duration, long long discard_padding)
+ : BlockEntry(pCluster, idx),
+ m_block(block_start, block_size, discard_padding),
+ m_prev(prev),
+ m_next(next),
+ m_duration(duration) {}
+
+long BlockGroup::Parse() {
+ const long status = m_block.Parse(m_pCluster);
+
+ if (status)
+ return status;
+
+ m_block.SetKey((m_prev > 0) && (m_next <= 0));
+
+ return 0;
+}
+
+BlockEntry::Kind BlockGroup::GetKind() const { return kBlockGroup; }
+const Block* BlockGroup::GetBlock() const { return &m_block; }
+long long BlockGroup::GetPrevTimeCode() const { return m_prev; }
+long long BlockGroup::GetNextTimeCode() const { return m_next; }
+long long BlockGroup::GetDurationTimeCode() const { return m_duration; }
+
+Block::Block(long long start, long long size_, long long discard_padding)
+ : m_start(start),
+ m_size(size_),
+ m_track(0),
+ m_timecode(-1),
+ m_flags(0),
+ m_frames(NULL),
+ m_frame_count(-1),
+ m_discard_padding(discard_padding) {}
+
+Block::~Block() { delete[] m_frames; }
+
+long Block::Parse(const Cluster* pCluster) {
+ if (pCluster == NULL)
+ return -1;
+
+ if (pCluster->m_pSegment == NULL)
+ return -1;
+
+ assert(m_start >= 0);
+ assert(m_size >= 0);
+ assert(m_track <= 0);
+ assert(m_frames == NULL);
+ assert(m_frame_count <= 0);
+
+ long long pos = m_start;
+ const long long stop = m_start + m_size;
+
+ long len;
+
+ IMkvReader* const pReader = pCluster->m_pSegment->m_pReader;
+
+ m_track = ReadUInt(pReader, pos, len);
+
+ if (m_track <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume track number
+
+ if ((stop - pos) < 2)
+ return E_FILE_FORMAT_INVALID;
+
+ long status;
+ long long value;
+
+ status = UnserializeInt(pReader, pos, 2, value);
+
+ if (status)
+ return E_FILE_FORMAT_INVALID;
+
+ if (value < SHRT_MIN)
+ return E_FILE_FORMAT_INVALID;
+
+ if (value > SHRT_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ m_timecode = static_cast<short>(value);
+
+ pos += 2;
+
+ if ((stop - pos) <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ status = pReader->Read(pos, 1, &m_flags);
+
+ if (status)
+ return E_FILE_FORMAT_INVALID;
+
+ const int lacing = int(m_flags & 0x06) >> 1;
+
+ ++pos; // consume flags byte
+
+ if (lacing == 0) { // no lacing
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ m_frame_count = 1;
+ m_frames = new (std::nothrow) Frame[m_frame_count];
+ if (m_frames == NULL)
+ return -1;
+
+ Frame& f = m_frames[0];
+ f.pos = pos;
+
+ const long long frame_size = stop - pos;
+
+ if (frame_size > LONG_MAX || frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ f.len = static_cast<long>(frame_size);
+
+ return 0; // success
+ }
+
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ unsigned char biased_count;
+
+ status = pReader->Read(pos, 1, &biased_count);
+
+ if (status)
+ return E_FILE_FORMAT_INVALID;
+
+ ++pos; // consume frame count
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ m_frame_count = int(biased_count) + 1;
+
+ m_frames = new (std::nothrow) Frame[m_frame_count];
+ if (m_frames == NULL)
+ return -1;
+
+ if (!m_frames)
+ return E_FILE_FORMAT_INVALID;
+
+ if (lacing == 1) { // Xiph
+ Frame* pf = m_frames;
+ Frame* const pf_end = pf + m_frame_count;
+
+ long long size = 0;
+ int frame_count = m_frame_count;
+
+ while (frame_count > 1) {
+ long frame_size = 0;
+
+ for (;;) {
+ unsigned char val;
+
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ status = pReader->Read(pos, 1, &val);
+
+ if (status)
+ return E_FILE_FORMAT_INVALID;
+
+ ++pos; // consume xiph size byte
+
+ frame_size += val;
+
+ if (val < 255)
+ break;
+ }
+
+ Frame& f = *pf++;
+ assert(pf < pf_end);
+ if (pf >= pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ f.pos = 0; // patch later
+
+ if (frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ f.len = frame_size;
+ size += frame_size; // contribution of this frame
+
+ --frame_count;
+ }
+
+ if (pf >= pf_end || pos > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ {
+ Frame& f = *pf++;
+
+ if (pf != pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ f.pos = 0; // patch later
+
+ const long long total_size = stop - pos;
+
+ if (total_size < size)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long frame_size = total_size - size;
+
+ if (frame_size > LONG_MAX || frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ f.len = static_cast<long>(frame_size);
+ }
+
+ pf = m_frames;
+ while (pf != pf_end) {
+ Frame& f = *pf++;
+ assert((pos + f.len) <= stop);
+
+ if ((pos + f.len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ f.pos = pos;
+ pos += f.len;
+ }
+
+ assert(pos == stop);
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ } else if (lacing == 2) { // fixed-size lacing
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long total_size = stop - pos;
+
+ if ((total_size % m_frame_count) != 0)
+ return E_FILE_FORMAT_INVALID;
+
+ const long long frame_size = total_size / m_frame_count;
+
+ if (frame_size > LONG_MAX || frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame* pf = m_frames;
+ Frame* const pf_end = pf + m_frame_count;
+
+ while (pf != pf_end) {
+ assert((pos + frame_size) <= stop);
+ if ((pos + frame_size) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame& f = *pf++;
+
+ f.pos = pos;
+ f.len = static_cast<long>(frame_size);
+
+ pos += frame_size;
+ }
+
+ assert(pos == stop);
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+
+ } else {
+ assert(lacing == 3); // EBML lacing
+
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ long long size = 0;
+ int frame_count = m_frame_count;
+
+ long long frame_size = ReadUInt(pReader, pos, len);
+
+ if (frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume length of size of first frame
+
+ if ((pos + frame_size) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame* pf = m_frames;
+ Frame* const pf_end = pf + m_frame_count;
+
+ {
+ Frame& curr = *pf;
+
+ curr.pos = 0; // patch later
+
+ curr.len = static_cast<long>(frame_size);
+ size += curr.len; // contribution of this frame
+ }
+
+ --frame_count;
+
+ while (frame_count > 1) {
+ if (pos >= stop)
+ return E_FILE_FORMAT_INVALID;
+
+ assert(pf < pf_end);
+ if (pf >= pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ const Frame& prev = *pf++;
+ assert(prev.len == frame_size);
+ if (prev.len != frame_size)
+ return E_FILE_FORMAT_INVALID;
+
+ assert(pf < pf_end);
+ if (pf >= pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame& curr = *pf;
+
+ curr.pos = 0; // patch later
+
+ const long long delta_size_ = ReadUInt(pReader, pos, len);
+
+ if (delta_size_ < 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if ((pos + len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ pos += len; // consume length of (delta) size
+ if (pos > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ const long exp = 7 * len - 1;
+ const long long bias = (1LL << exp) - 1LL;
+ const long long delta_size = delta_size_ - bias;
+
+ frame_size += delta_size;
+
+ if (frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ if (frame_size > LONG_MAX)
+ return E_FILE_FORMAT_INVALID;
+
+ curr.len = static_cast<long>(frame_size);
+ // Check if size + curr.len could overflow.
+ if (size > LLONG_MAX - curr.len) {
+ return E_FILE_FORMAT_INVALID;
+ }
+ size += curr.len; // contribution of this frame
+
+ --frame_count;
+ }
+
+ // parse last frame
+ if (frame_count > 0) {
+ if (pos > stop || pf >= pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ const Frame& prev = *pf++;
+ assert(prev.len == frame_size);
+ if (prev.len != frame_size)
+ return E_FILE_FORMAT_INVALID;
+
+ if (pf >= pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ Frame& curr = *pf++;
+ if (pf != pf_end)
+ return E_FILE_FORMAT_INVALID;
+
+ curr.pos = 0; // patch later
+
+ const long long total_size = stop - pos;
+
+ if (total_size < size)
+ return E_FILE_FORMAT_INVALID;
+
+ frame_size = total_size - size;
+
+ if (frame_size > LONG_MAX || frame_size <= 0)
+ return E_FILE_FORMAT_INVALID;
+
+ curr.len = static_cast<long>(frame_size);
+ }
+
+ pf = m_frames;
+ while (pf != pf_end) {
+ Frame& f = *pf++;
+ if ((pos + f.len) > stop)
+ return E_FILE_FORMAT_INVALID;
+
+ f.pos = pos;
+ pos += f.len;
+ }
+
+ if (pos != stop)
+ return E_FILE_FORMAT_INVALID;
+ }
+
+ return 0; // success
+}
+
+long long Block::GetTimeCode(const Cluster* pCluster) const {
+ if (pCluster == 0)
+ return m_timecode;
+
+ const long long tc0 = pCluster->GetTimeCode();
+ assert(tc0 >= 0);
+
+ // Check if tc0 + m_timecode would overflow.
+ if (tc0 < 0 || LLONG_MAX - tc0 < m_timecode) {
+ return -1;
+ }
+
+ const long long tc = tc0 + m_timecode;
+
+ return tc; // unscaled timecode units
+}
+
+long long Block::GetTime(const Cluster* pCluster) const {
+ assert(pCluster);
+
+ const long long tc = GetTimeCode(pCluster);
+
+ const Segment* const pSegment = pCluster->m_pSegment;
+ const SegmentInfo* const pInfo = pSegment->GetInfo();
+ assert(pInfo);
+
+ const long long scale = pInfo->GetTimeCodeScale();
+ assert(scale >= 1);
+
+ // Check if tc * scale could overflow.
+ if (tc != 0 && scale > LLONG_MAX / tc) {
+ return -1;
+ }
+ const long long ns = tc * scale;
+
+ return ns;
+}
+
+long long Block::GetTrackNumber() const { return m_track; }
+
+bool Block::IsKey() const {
+ return ((m_flags & static_cast<unsigned char>(1 << 7)) != 0);
+}
+
+void Block::SetKey(bool bKey) {
+ if (bKey)
+ m_flags |= static_cast<unsigned char>(1 << 7);
+ else
+ m_flags &= 0x7F;
+}
+
+bool Block::IsInvisible() const { return bool(int(m_flags & 0x08) != 0); }
+
+Block::Lacing Block::GetLacing() const {
+ const int value = int(m_flags & 0x06) >> 1;
+ return static_cast<Lacing>(value);
+}
+
+int Block::GetFrameCount() const { return m_frame_count; }
+
+const Block::Frame& Block::GetFrame(int idx) const {
+ assert(idx >= 0);
+ assert(idx < m_frame_count);
+
+ const Frame& f = m_frames[idx];
+ assert(f.pos > 0);
+ assert(f.len > 0);
+
+ return f;
+}
+
+long Block::Frame::Read(IMkvReader* pReader, unsigned char* buf) const {
+ assert(pReader);
+ assert(buf);
+
+ const long status = pReader->Read(pos, len, buf);
+ return status;
+}
+
+long long Block::GetDiscardPadding() const { return m_discard_padding; }
+
+} // namespace mkvparser
diff --git a/media/libaom/src/third_party/libwebm/mkvparser/mkvparser.h b/media/libaom/src/third_party/libwebm/mkvparser/mkvparser.h
new file mode 100644
index 000000000..26c2b7e5e
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvparser/mkvparser.h
@@ -0,0 +1,1145 @@
+// Copyright (c) 2012 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#ifndef MKVPARSER_MKVPARSER_H_
+#define MKVPARSER_MKVPARSER_H_
+
+#include <cstddef>
+
+namespace mkvparser {
+
+const int E_PARSE_FAILED = -1;
+const int E_FILE_FORMAT_INVALID = -2;
+const int E_BUFFER_NOT_FULL = -3;
+
+class IMkvReader {
+ public:
+ virtual int Read(long long pos, long len, unsigned char* buf) = 0;
+ virtual int Length(long long* total, long long* available) = 0;
+
+ protected:
+ virtual ~IMkvReader();
+};
+
+template <typename Type>
+Type* SafeArrayAlloc(unsigned long long num_elements,
+ unsigned long long element_size);
+long long GetUIntLength(IMkvReader*, long long, long&);
+long long ReadUInt(IMkvReader*, long long, long&);
+long long ReadID(IMkvReader* pReader, long long pos, long& len);
+long long UnserializeUInt(IMkvReader*, long long pos, long long size);
+
+long UnserializeFloat(IMkvReader*, long long pos, long long size, double&);
+long UnserializeInt(IMkvReader*, long long pos, long long size,
+ long long& result);
+
+long UnserializeString(IMkvReader*, long long pos, long long size, char*& str);
+
+long ParseElementHeader(IMkvReader* pReader,
+ long long& pos, // consume id and size fields
+ long long stop, // if you know size of element's parent
+ long long& id, long long& size);
+
+bool Match(IMkvReader*, long long&, unsigned long, long long&);
+bool Match(IMkvReader*, long long&, unsigned long, unsigned char*&, size_t&);
+
+void GetVersion(int& major, int& minor, int& build, int& revision);
+
+struct EBMLHeader {
+ EBMLHeader();
+ ~EBMLHeader();
+ long long m_version;
+ long long m_readVersion;
+ long long m_maxIdLength;
+ long long m_maxSizeLength;
+ char* m_docType;
+ long long m_docTypeVersion;
+ long long m_docTypeReadVersion;
+
+ long long Parse(IMkvReader*, long long&);
+ void Init();
+};
+
+class Segment;
+class Track;
+class Cluster;
+
+class Block {
+ Block(const Block&);
+ Block& operator=(const Block&);
+
+ public:
+ const long long m_start;
+ const long long m_size;
+
+ Block(long long start, long long size, long long discard_padding);
+ ~Block();
+
+ long Parse(const Cluster*);
+
+ long long GetTrackNumber() const;
+ long long GetTimeCode(const Cluster*) const; // absolute, but not scaled
+ long long GetTime(const Cluster*) const; // absolute, and scaled (ns)
+ bool IsKey() const;
+ void SetKey(bool);
+ bool IsInvisible() const;
+
+ enum Lacing { kLacingNone, kLacingXiph, kLacingFixed, kLacingEbml };
+ Lacing GetLacing() const;
+
+ int GetFrameCount() const; // to index frames: [0, count)
+
+ struct Frame {
+ long long pos; // absolute offset
+ long len;
+
+ long Read(IMkvReader*, unsigned char*) const;
+ };
+
+ const Frame& GetFrame(int frame_index) const;
+
+ long long GetDiscardPadding() const;
+
+ private:
+ long long m_track; // Track::Number()
+ short m_timecode; // relative to cluster
+ unsigned char m_flags;
+
+ Frame* m_frames;
+ int m_frame_count;
+
+ protected:
+ const long long m_discard_padding;
+};
+
+class BlockEntry {
+ BlockEntry(const BlockEntry&);
+ BlockEntry& operator=(const BlockEntry&);
+
+ protected:
+ BlockEntry(Cluster*, long index);
+
+ public:
+ virtual ~BlockEntry();
+
+ bool EOS() const { return (GetKind() == kBlockEOS); }
+ const Cluster* GetCluster() const;
+ long GetIndex() const;
+ virtual const Block* GetBlock() const = 0;
+
+ enum Kind { kBlockEOS, kBlockSimple, kBlockGroup };
+ virtual Kind GetKind() const = 0;
+
+ protected:
+ Cluster* const m_pCluster;
+ const long m_index;
+};
+
+class SimpleBlock : public BlockEntry {
+ SimpleBlock(const SimpleBlock&);
+ SimpleBlock& operator=(const SimpleBlock&);
+
+ public:
+ SimpleBlock(Cluster*, long index, long long start, long long size);
+ long Parse();
+
+ Kind GetKind() const;
+ const Block* GetBlock() const;
+
+ protected:
+ Block m_block;
+};
+
+class BlockGroup : public BlockEntry {
+ BlockGroup(const BlockGroup&);
+ BlockGroup& operator=(const BlockGroup&);
+
+ public:
+ BlockGroup(Cluster*, long index,
+ long long block_start, // absolute pos of block's payload
+ long long block_size, // size of block's payload
+ long long prev, long long next, long long duration,
+ long long discard_padding);
+
+ long Parse();
+
+ Kind GetKind() const;
+ const Block* GetBlock() const;
+
+ long long GetPrevTimeCode() const; // relative to block's time
+ long long GetNextTimeCode() const; // as above
+ long long GetDurationTimeCode() const;
+
+ private:
+ Block m_block;
+ const long long m_prev;
+ const long long m_next;
+ const long long m_duration;
+};
+
+///////////////////////////////////////////////////////////////
+// ContentEncoding element
+// Elements used to describe if the track data has been encrypted or
+// compressed with zlib or header stripping.
+class ContentEncoding {
+ public:
+ enum { kCTR = 1 };
+
+ ContentEncoding();
+ ~ContentEncoding();
+
+ // ContentCompression element names
+ struct ContentCompression {
+ ContentCompression();
+ ~ContentCompression();
+
+ unsigned long long algo;
+ unsigned char* settings;
+ long long settings_len;
+ };
+
+ // ContentEncAESSettings element names
+ struct ContentEncAESSettings {
+ ContentEncAESSettings() : cipher_mode(kCTR) {}
+ ~ContentEncAESSettings() {}
+
+ unsigned long long cipher_mode;
+ };
+
+ // ContentEncryption element names
+ struct ContentEncryption {
+ ContentEncryption();
+ ~ContentEncryption();
+
+ unsigned long long algo;
+ unsigned char* key_id;
+ long long key_id_len;
+ unsigned char* signature;
+ long long signature_len;
+ unsigned char* sig_key_id;
+ long long sig_key_id_len;
+ unsigned long long sig_algo;
+ unsigned long long sig_hash_algo;
+
+ ContentEncAESSettings aes_settings;
+ };
+
+ // Returns ContentCompression represented by |idx|. Returns NULL if |idx|
+ // is out of bounds.
+ const ContentCompression* GetCompressionByIndex(unsigned long idx) const;
+
+ // Returns number of ContentCompression elements in this ContentEncoding
+ // element.
+ unsigned long GetCompressionCount() const;
+
+ // Parses the ContentCompression element from |pReader|. |start| is the
+ // starting offset of the ContentCompression payload. |size| is the size in
+ // bytes of the ContentCompression payload. |compression| is where the parsed
+ // values will be stored.
+ long ParseCompressionEntry(long long start, long long size,
+ IMkvReader* pReader,
+ ContentCompression* compression);
+
+ // Returns ContentEncryption represented by |idx|. Returns NULL if |idx|
+ // is out of bounds.
+ const ContentEncryption* GetEncryptionByIndex(unsigned long idx) const;
+
+ // Returns number of ContentEncryption elements in this ContentEncoding
+ // element.
+ unsigned long GetEncryptionCount() const;
+
+ // Parses the ContentEncAESSettings element from |pReader|. |start| is the
+ // starting offset of the ContentEncAESSettings payload. |size| is the
+ // size in bytes of the ContentEncAESSettings payload. |encryption| is
+ // where the parsed values will be stored.
+ long ParseContentEncAESSettingsEntry(long long start, long long size,
+ IMkvReader* pReader,
+ ContentEncAESSettings* aes);
+
+ // Parses the ContentEncoding element from |pReader|. |start| is the
+ // starting offset of the ContentEncoding payload. |size| is the size in
+ // bytes of the ContentEncoding payload. Returns true on success.
+ long ParseContentEncodingEntry(long long start, long long size,
+ IMkvReader* pReader);
+
+ // Parses the ContentEncryption element from |pReader|. |start| is the
+ // starting offset of the ContentEncryption payload. |size| is the size in
+ // bytes of the ContentEncryption payload. |encryption| is where the parsed
+ // values will be stored.
+ long ParseEncryptionEntry(long long start, long long size,
+ IMkvReader* pReader, ContentEncryption* encryption);
+
+ unsigned long long encoding_order() const { return encoding_order_; }
+ unsigned long long encoding_scope() const { return encoding_scope_; }
+ unsigned long long encoding_type() const { return encoding_type_; }
+
+ private:
+ // Member variables for list of ContentCompression elements.
+ ContentCompression** compression_entries_;
+ ContentCompression** compression_entries_end_;
+
+ // Member variables for list of ContentEncryption elements.
+ ContentEncryption** encryption_entries_;
+ ContentEncryption** encryption_entries_end_;
+
+ // ContentEncoding element names
+ unsigned long long encoding_order_;
+ unsigned long long encoding_scope_;
+ unsigned long long encoding_type_;
+
+ // LIBWEBM_DISALLOW_COPY_AND_ASSIGN(ContentEncoding);
+ ContentEncoding(const ContentEncoding&);
+ ContentEncoding& operator=(const ContentEncoding&);
+};
+
+class Track {
+ Track(const Track&);
+ Track& operator=(const Track&);
+
+ public:
+ class Info;
+ static long Create(Segment*, const Info&, long long element_start,
+ long long element_size, Track*&);
+
+ enum Type { kVideo = 1, kAudio = 2, kSubtitle = 0x11, kMetadata = 0x21 };
+
+ Segment* const m_pSegment;
+ const long long m_element_start;
+ const long long m_element_size;
+ virtual ~Track();
+
+ long GetType() const;
+ long GetNumber() const;
+ unsigned long long GetUid() const;
+ const char* GetNameAsUTF8() const;
+ const char* GetLanguage() const;
+ const char* GetCodecNameAsUTF8() const;
+ const char* GetCodecId() const;
+ const unsigned char* GetCodecPrivate(size_t&) const;
+ bool GetLacing() const;
+ unsigned long long GetDefaultDuration() const;
+ unsigned long long GetCodecDelay() const;
+ unsigned long long GetSeekPreRoll() const;
+
+ const BlockEntry* GetEOS() const;
+
+ struct Settings {
+ long long start;
+ long long size;
+ };
+
+ class Info {
+ public:
+ Info();
+ ~Info();
+ int Copy(Info&) const;
+ void Clear();
+ long type;
+ long number;
+ unsigned long long uid;
+ unsigned long long defaultDuration;
+ unsigned long long codecDelay;
+ unsigned long long seekPreRoll;
+ char* nameAsUTF8;
+ char* language;
+ char* codecId;
+ char* codecNameAsUTF8;
+ unsigned char* codecPrivate;
+ size_t codecPrivateSize;
+ bool lacing;
+ Settings settings;
+
+ private:
+ Info(const Info&);
+ Info& operator=(const Info&);
+ int CopyStr(char* Info::*str, Info&) const;
+ };
+
+ long GetFirst(const BlockEntry*&) const;
+ long GetNext(const BlockEntry* pCurr, const BlockEntry*& pNext) const;
+ virtual bool VetEntry(const BlockEntry*) const;
+ virtual long Seek(long long time_ns, const BlockEntry*&) const;
+
+ const ContentEncoding* GetContentEncodingByIndex(unsigned long idx) const;
+ unsigned long GetContentEncodingCount() const;
+
+ long ParseContentEncodingsEntry(long long start, long long size);
+
+ protected:
+ Track(Segment*, long long element_start, long long element_size);
+
+ Info m_info;
+
+ class EOSBlock : public BlockEntry {
+ public:
+ EOSBlock();
+
+ Kind GetKind() const;
+ const Block* GetBlock() const;
+ };
+
+ EOSBlock m_eos;
+
+ private:
+ ContentEncoding** content_encoding_entries_;
+ ContentEncoding** content_encoding_entries_end_;
+};
+
+struct PrimaryChromaticity {
+ PrimaryChromaticity() : x(0), y(0) {}
+ ~PrimaryChromaticity() {}
+ static bool Parse(IMkvReader* reader, long long read_pos,
+ long long value_size, bool is_x,
+ PrimaryChromaticity** chromaticity);
+ float x;
+ float y;
+};
+
+struct MasteringMetadata {
+ static const float kValueNotPresent;
+
+ MasteringMetadata()
+ : r(NULL),
+ g(NULL),
+ b(NULL),
+ white_point(NULL),
+ luminance_max(kValueNotPresent),
+ luminance_min(kValueNotPresent) {}
+ ~MasteringMetadata() {
+ delete r;
+ delete g;
+ delete b;
+ delete white_point;
+ }
+
+ static bool Parse(IMkvReader* reader, long long element_start,
+ long long element_size,
+ MasteringMetadata** mastering_metadata);
+
+ PrimaryChromaticity* r;
+ PrimaryChromaticity* g;
+ PrimaryChromaticity* b;
+ PrimaryChromaticity* white_point;
+ float luminance_max;
+ float luminance_min;
+};
+
+struct Colour {
+ static const long long kValueNotPresent;
+
+ // Unless otherwise noted all values assigned upon construction are the
+ // equivalent of unspecified/default.
+ Colour()
+ : matrix_coefficients(kValueNotPresent),
+ bits_per_channel(kValueNotPresent),
+ chroma_subsampling_horz(kValueNotPresent),
+ chroma_subsampling_vert(kValueNotPresent),
+ cb_subsampling_horz(kValueNotPresent),
+ cb_subsampling_vert(kValueNotPresent),
+ chroma_siting_horz(kValueNotPresent),
+ chroma_siting_vert(kValueNotPresent),
+ range(kValueNotPresent),
+ transfer_characteristics(kValueNotPresent),
+ primaries(kValueNotPresent),
+ max_cll(kValueNotPresent),
+ max_fall(kValueNotPresent),
+ mastering_metadata(NULL) {}
+ ~Colour() {
+ delete mastering_metadata;
+ mastering_metadata = NULL;
+ }
+
+ static bool Parse(IMkvReader* reader, long long element_start,
+ long long element_size, Colour** colour);
+
+ long long matrix_coefficients;
+ long long bits_per_channel;
+ long long chroma_subsampling_horz;
+ long long chroma_subsampling_vert;
+ long long cb_subsampling_horz;
+ long long cb_subsampling_vert;
+ long long chroma_siting_horz;
+ long long chroma_siting_vert;
+ long long range;
+ long long transfer_characteristics;
+ long long primaries;
+ long long max_cll;
+ long long max_fall;
+
+ MasteringMetadata* mastering_metadata;
+};
+
+struct Projection {
+ enum ProjectionType {
+ kTypeNotPresent = -1,
+ kRectangular = 0,
+ kEquirectangular = 1,
+ kCubeMap = 2,
+ kMesh = 3,
+ };
+ static const float kValueNotPresent;
+ Projection()
+ : type(kTypeNotPresent),
+ private_data(NULL),
+ private_data_length(0),
+ pose_yaw(kValueNotPresent),
+ pose_pitch(kValueNotPresent),
+ pose_roll(kValueNotPresent) {}
+ ~Projection() { delete[] private_data; }
+ static bool Parse(IMkvReader* reader, long long element_start,
+ long long element_size, Projection** projection);
+
+ ProjectionType type;
+ unsigned char* private_data;
+ size_t private_data_length;
+ float pose_yaw;
+ float pose_pitch;
+ float pose_roll;
+};
+
+class VideoTrack : public Track {
+ VideoTrack(const VideoTrack&);
+ VideoTrack& operator=(const VideoTrack&);
+
+ VideoTrack(Segment*, long long element_start, long long element_size);
+
+ public:
+ virtual ~VideoTrack();
+ static long Parse(Segment*, const Info&, long long element_start,
+ long long element_size, VideoTrack*&);
+
+ long long GetWidth() const;
+ long long GetHeight() const;
+ long long GetDisplayWidth() const;
+ long long GetDisplayHeight() const;
+ long long GetDisplayUnit() const;
+ long long GetStereoMode() const;
+ double GetFrameRate() const;
+
+ bool VetEntry(const BlockEntry*) const;
+ long Seek(long long time_ns, const BlockEntry*&) const;
+
+ Colour* GetColour() const;
+
+ Projection* GetProjection() const;
+
+ private:
+ long long m_width;
+ long long m_height;
+ long long m_display_width;
+ long long m_display_height;
+ long long m_display_unit;
+ long long m_stereo_mode;
+
+ double m_rate;
+
+ Colour* m_colour;
+ Projection* m_projection;
+};
+
+class AudioTrack : public Track {
+ AudioTrack(const AudioTrack&);
+ AudioTrack& operator=(const AudioTrack&);
+
+ AudioTrack(Segment*, long long element_start, long long element_size);
+
+ public:
+ static long Parse(Segment*, const Info&, long long element_start,
+ long long element_size, AudioTrack*&);
+
+ double GetSamplingRate() const;
+ long long GetChannels() const;
+ long long GetBitDepth() const;
+
+ private:
+ double m_rate;
+ long long m_channels;
+ long long m_bitDepth;
+};
+
+class Tracks {
+ Tracks(const Tracks&);
+ Tracks& operator=(const Tracks&);
+
+ public:
+ Segment* const m_pSegment;
+ const long long m_start;
+ const long long m_size;
+ const long long m_element_start;
+ const long long m_element_size;
+
+ Tracks(Segment*, long long start, long long size, long long element_start,
+ long long element_size);
+
+ ~Tracks();
+
+ long Parse();
+
+ unsigned long GetTracksCount() const;
+
+ const Track* GetTrackByNumber(long tn) const;
+ const Track* GetTrackByIndex(unsigned long idx) const;
+
+ private:
+ Track** m_trackEntries;
+ Track** m_trackEntriesEnd;
+
+ long ParseTrackEntry(long long payload_start, long long payload_size,
+ long long element_start, long long element_size,
+ Track*&) const;
+};
+
+class Chapters {
+ Chapters(const Chapters&);
+ Chapters& operator=(const Chapters&);
+
+ public:
+ Segment* const m_pSegment;
+ const long long m_start;
+ const long long m_size;
+ const long long m_element_start;
+ const long long m_element_size;
+
+ Chapters(Segment*, long long payload_start, long long payload_size,
+ long long element_start, long long element_size);
+
+ ~Chapters();
+
+ long Parse();
+
+ class Atom;
+ class Edition;
+
+ class Display {
+ friend class Atom;
+ Display();
+ Display(const Display&);
+ ~Display();
+ Display& operator=(const Display&);
+
+ public:
+ const char* GetString() const;
+ const char* GetLanguage() const;
+ const char* GetCountry() const;
+
+ private:
+ void Init();
+ void ShallowCopy(Display&) const;
+ void Clear();
+ long Parse(IMkvReader*, long long pos, long long size);
+
+ char* m_string;
+ char* m_language;
+ char* m_country;
+ };
+
+ class Atom {
+ friend class Edition;
+ Atom();
+ Atom(const Atom&);
+ ~Atom();
+ Atom& operator=(const Atom&);
+
+ public:
+ unsigned long long GetUID() const;
+ const char* GetStringUID() const;
+
+ long long GetStartTimecode() const;
+ long long GetStopTimecode() const;
+
+ long long GetStartTime(const Chapters*) const;
+ long long GetStopTime(const Chapters*) const;
+
+ int GetDisplayCount() const;
+ const Display* GetDisplay(int index) const;
+
+ private:
+ void Init();
+ void ShallowCopy(Atom&) const;
+ void Clear();
+ long Parse(IMkvReader*, long long pos, long long size);
+ static long long GetTime(const Chapters*, long long timecode);
+
+ long ParseDisplay(IMkvReader*, long long pos, long long size);
+ bool ExpandDisplaysArray();
+
+ char* m_string_uid;
+ unsigned long long m_uid;
+ long long m_start_timecode;
+ long long m_stop_timecode;
+
+ Display* m_displays;
+ int m_displays_size;
+ int m_displays_count;
+ };
+
+ class Edition {
+ friend class Chapters;
+ Edition();
+ Edition(const Edition&);
+ ~Edition();
+ Edition& operator=(const Edition&);
+
+ public:
+ int GetAtomCount() const;
+ const Atom* GetAtom(int index) const;
+
+ private:
+ void Init();
+ void ShallowCopy(Edition&) const;
+ void Clear();
+ long Parse(IMkvReader*, long long pos, long long size);
+
+ long ParseAtom(IMkvReader*, long long pos, long long size);
+ bool ExpandAtomsArray();
+
+ Atom* m_atoms;
+ int m_atoms_size;
+ int m_atoms_count;
+ };
+
+ int GetEditionCount() const;
+ const Edition* GetEdition(int index) const;
+
+ private:
+ long ParseEdition(long long pos, long long size);
+ bool ExpandEditionsArray();
+
+ Edition* m_editions;
+ int m_editions_size;
+ int m_editions_count;
+};
+
+class Tags {
+ Tags(const Tags&);
+ Tags& operator=(const Tags&);
+
+ public:
+ Segment* const m_pSegment;
+ const long long m_start;
+ const long long m_size;
+ const long long m_element_start;
+ const long long m_element_size;
+
+ Tags(Segment*, long long payload_start, long long payload_size,
+ long long element_start, long long element_size);
+
+ ~Tags();
+
+ long Parse();
+
+ class Tag;
+ class SimpleTag;
+
+ class SimpleTag {
+ friend class Tag;
+ SimpleTag();
+ SimpleTag(const SimpleTag&);
+ ~SimpleTag();
+ SimpleTag& operator=(const SimpleTag&);
+
+ public:
+ const char* GetTagName() const;
+ const char* GetTagString() const;
+
+ private:
+ void Init();
+ void ShallowCopy(SimpleTag&) const;
+ void Clear();
+ long Parse(IMkvReader*, long long pos, long long size);
+
+ char* m_tag_name;
+ char* m_tag_string;
+ };
+
+ class Tag {
+ friend class Tags;
+ Tag();
+ Tag(const Tag&);
+ ~Tag();
+ Tag& operator=(const Tag&);
+
+ public:
+ int GetSimpleTagCount() const;
+ const SimpleTag* GetSimpleTag(int index) const;
+
+ private:
+ void Init();
+ void ShallowCopy(Tag&) const;
+ void Clear();
+ long Parse(IMkvReader*, long long pos, long long size);
+
+ long ParseSimpleTag(IMkvReader*, long long pos, long long size);
+ bool ExpandSimpleTagsArray();
+
+ SimpleTag* m_simple_tags;
+ int m_simple_tags_size;
+ int m_simple_tags_count;
+ };
+
+ int GetTagCount() const;
+ const Tag* GetTag(int index) const;
+
+ private:
+ long ParseTag(long long pos, long long size);
+ bool ExpandTagsArray();
+
+ Tag* m_tags;
+ int m_tags_size;
+ int m_tags_count;
+};
+
+class SegmentInfo {
+ SegmentInfo(const SegmentInfo&);
+ SegmentInfo& operator=(const SegmentInfo&);
+
+ public:
+ Segment* const m_pSegment;
+ const long long m_start;
+ const long long m_size;
+ const long long m_element_start;
+ const long long m_element_size;
+
+ SegmentInfo(Segment*, long long start, long long size,
+ long long element_start, long long element_size);
+
+ ~SegmentInfo();
+
+ long Parse();
+
+ long long GetTimeCodeScale() const;
+ long long GetDuration() const; // scaled
+ const char* GetMuxingAppAsUTF8() const;
+ const char* GetWritingAppAsUTF8() const;
+ const char* GetTitleAsUTF8() const;
+
+ private:
+ long long m_timecodeScale;
+ double m_duration;
+ char* m_pMuxingAppAsUTF8;
+ char* m_pWritingAppAsUTF8;
+ char* m_pTitleAsUTF8;
+};
+
+class SeekHead {
+ SeekHead(const SeekHead&);
+ SeekHead& operator=(const SeekHead&);
+
+ public:
+ Segment* const m_pSegment;
+ const long long m_start;
+ const long long m_size;
+ const long long m_element_start;
+ const long long m_element_size;
+
+ SeekHead(Segment*, long long start, long long size, long long element_start,
+ long long element_size);
+
+ ~SeekHead();
+
+ long Parse();
+
+ struct Entry {
+ Entry();
+
+ // the SeekHead entry payload
+ long long id;
+ long long pos;
+
+ // absolute pos of SeekEntry ID
+ long long element_start;
+
+ // SeekEntry ID size + size size + payload
+ long long element_size;
+ };
+
+ int GetCount() const;
+ const Entry* GetEntry(int idx) const;
+
+ struct VoidElement {
+ // absolute pos of Void ID
+ long long element_start;
+
+ // ID size + size size + payload size
+ long long element_size;
+ };
+
+ int GetVoidElementCount() const;
+ const VoidElement* GetVoidElement(int idx) const;
+
+ private:
+ Entry* m_entries;
+ int m_entry_count;
+
+ VoidElement* m_void_elements;
+ int m_void_element_count;
+
+ static bool ParseEntry(IMkvReader*,
+ long long pos, // payload
+ long long size, Entry*);
+};
+
+class Cues;
+class CuePoint {
+ friend class Cues;
+
+ CuePoint(long, long long);
+ ~CuePoint();
+
+ CuePoint(const CuePoint&);
+ CuePoint& operator=(const CuePoint&);
+
+ public:
+ long long m_element_start;
+ long long m_element_size;
+
+ bool Load(IMkvReader*);
+
+ long long GetTimeCode() const; // absolute but unscaled
+ long long GetTime(const Segment*) const; // absolute and scaled (ns units)
+
+ struct TrackPosition {
+ long long m_track;
+ long long m_pos; // of cluster
+ long long m_block;
+ // codec_state //defaults to 0
+ // reference = clusters containing req'd referenced blocks
+ // reftime = timecode of the referenced block
+
+ bool Parse(IMkvReader*, long long, long long);
+ };
+
+ const TrackPosition* Find(const Track*) const;
+
+ private:
+ const long m_index;
+ long long m_timecode;
+ TrackPosition* m_track_positions;
+ size_t m_track_positions_count;
+};
+
+class Cues {
+ friend class Segment;
+
+ Cues(Segment*, long long start, long long size, long long element_start,
+ long long element_size);
+ ~Cues();
+
+ Cues(const Cues&);
+ Cues& operator=(const Cues&);
+
+ public:
+ Segment* const m_pSegment;
+ const long long m_start;
+ const long long m_size;
+ const long long m_element_start;
+ const long long m_element_size;
+
+ bool Find( // lower bound of time_ns
+ long long time_ns, const Track*, const CuePoint*&,
+ const CuePoint::TrackPosition*&) const;
+
+ const CuePoint* GetFirst() const;
+ const CuePoint* GetLast() const;
+ const CuePoint* GetNext(const CuePoint*) const;
+
+ const BlockEntry* GetBlock(const CuePoint*,
+ const CuePoint::TrackPosition*) const;
+
+ bool LoadCuePoint() const;
+ long GetCount() const; // loaded only
+ // long GetTotal() const; //loaded + preloaded
+ bool DoneParsing() const;
+
+ private:
+ bool Init() const;
+ bool PreloadCuePoint(long&, long long) const;
+
+ mutable CuePoint** m_cue_points;
+ mutable long m_count;
+ mutable long m_preload_count;
+ mutable long long m_pos;
+};
+
+class Cluster {
+ friend class Segment;
+
+ Cluster(const Cluster&);
+ Cluster& operator=(const Cluster&);
+
+ public:
+ Segment* const m_pSegment;
+
+ public:
+ static Cluster* Create(Segment*,
+ long index, // index in segment
+ long long off); // offset relative to segment
+ // long long element_size);
+
+ Cluster(); // EndOfStream
+ ~Cluster();
+
+ bool EOS() const;
+
+ long long GetTimeCode() const; // absolute, but not scaled
+ long long GetTime() const; // absolute, and scaled (nanosecond units)
+ long long GetFirstTime() const; // time (ns) of first (earliest) block
+ long long GetLastTime() const; // time (ns) of last (latest) block
+
+ long GetFirst(const BlockEntry*&) const;
+ long GetLast(const BlockEntry*&) const;
+ long GetNext(const BlockEntry* curr, const BlockEntry*& next) const;
+
+ const BlockEntry* GetEntry(const Track*, long long ns = -1) const;
+ const BlockEntry* GetEntry(const CuePoint&,
+ const CuePoint::TrackPosition&) const;
+ // const BlockEntry* GetMaxKey(const VideoTrack*) const;
+
+ // static bool HasBlockEntries(const Segment*, long long);
+
+ static long HasBlockEntries(const Segment*, long long idoff, long long& pos,
+ long& size);
+
+ long GetEntryCount() const;
+
+ long Load(long long& pos, long& size) const;
+
+ long Parse(long long& pos, long& size) const;
+ long GetEntry(long index, const mkvparser::BlockEntry*&) const;
+
+ protected:
+ Cluster(Segment*, long index, long long element_start);
+ // long long element_size);
+
+ public:
+ const long long m_element_start;
+ long long GetPosition() const; // offset relative to segment
+
+ long GetIndex() const;
+ long long GetElementSize() const;
+ // long long GetPayloadSize() const;
+
+ // long long Unparsed() const;
+
+ private:
+ long m_index;
+ mutable long long m_pos;
+ // mutable long long m_size;
+ mutable long long m_element_size;
+ mutable long long m_timecode;
+ mutable BlockEntry** m_entries;
+ mutable long m_entries_size;
+ mutable long m_entries_count;
+
+ long ParseSimpleBlock(long long, long long&, long&);
+ long ParseBlockGroup(long long, long long&, long&);
+
+ long CreateBlock(long long id, long long pos, long long size,
+ long long discard_padding);
+ long CreateBlockGroup(long long start_offset, long long size,
+ long long discard_padding);
+ long CreateSimpleBlock(long long, long long);
+};
+
+class Segment {
+ friend class Cues;
+ friend class Track;
+ friend class VideoTrack;
+
+ Segment(const Segment&);
+ Segment& operator=(const Segment&);
+
+ private:
+ Segment(IMkvReader*, long long elem_start,
+ // long long elem_size,
+ long long pos, long long size);
+
+ public:
+ IMkvReader* const m_pReader;
+ const long long m_element_start;
+ // const long long m_element_size;
+ const long long m_start; // posn of segment payload
+ const long long m_size; // size of segment payload
+ Cluster m_eos; // TODO: make private?
+
+ static long long CreateInstance(IMkvReader*, long long, Segment*&);
+ ~Segment();
+
+ long Load(); // loads headers and all clusters
+
+ // for incremental loading
+ // long long Unparsed() const;
+ bool DoneParsing() const;
+ long long ParseHeaders(); // stops when first cluster is found
+ // long FindNextCluster(long long& pos, long& size) const;
+ long LoadCluster(long long& pos, long& size); // load one cluster
+ long LoadCluster();
+
+ long ParseNext(const Cluster* pCurr, const Cluster*& pNext, long long& pos,
+ long& size);
+
+ const SeekHead* GetSeekHead() const;
+ const Tracks* GetTracks() const;
+ const SegmentInfo* GetInfo() const;
+ const Cues* GetCues() const;
+ const Chapters* GetChapters() const;
+ const Tags* GetTags() const;
+
+ long long GetDuration() const;
+
+ unsigned long GetCount() const;
+ const Cluster* GetFirst() const;
+ const Cluster* GetLast() const;
+ const Cluster* GetNext(const Cluster*);
+
+ const Cluster* FindCluster(long long time_nanoseconds) const;
+ // const BlockEntry* Seek(long long time_nanoseconds, const Track*) const;
+
+ const Cluster* FindOrPreloadCluster(long long pos);
+
+ long ParseCues(long long cues_off, // offset relative to start of segment
+ long long& parse_pos, long& parse_len);
+
+ private:
+ long long m_pos; // absolute file posn; what has been consumed so far
+ Cluster* m_pUnknownSize;
+
+ SeekHead* m_pSeekHead;
+ SegmentInfo* m_pInfo;
+ Tracks* m_pTracks;
+ Cues* m_pCues;
+ Chapters* m_pChapters;
+ Tags* m_pTags;
+ Cluster** m_clusters;
+ long m_clusterCount; // number of entries for which m_index >= 0
+ long m_clusterPreloadCount; // number of entries for which m_index < 0
+ long m_clusterSize; // array size
+
+ long DoLoadCluster(long long&, long&);
+ long DoLoadClusterUnknownSize(long long&, long&);
+ long DoParseNext(const Cluster*&, long long&, long&);
+
+ bool AppendCluster(Cluster*);
+ bool PreloadCluster(Cluster*, ptrdiff_t);
+
+ // void ParseSeekHead(long long pos, long long size);
+ // void ParseSeekEntry(long long pos, long long size);
+ // void ParseCues(long long);
+
+ const BlockEntry* GetBlock(const CuePoint&, const CuePoint::TrackPosition&);
+};
+
+} // namespace mkvparser
+
+inline long mkvparser::Segment::LoadCluster() {
+ long long pos;
+ long size;
+
+ return LoadCluster(pos, size);
+}
+
+#endif // MKVPARSER_MKVPARSER_H_
diff --git a/media/libaom/src/third_party/libwebm/mkvparser/mkvreader.cc b/media/libaom/src/third_party/libwebm/mkvparser/mkvreader.cc
new file mode 100644
index 000000000..23d68f508
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvparser/mkvreader.cc
@@ -0,0 +1,133 @@
+// Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#include "mkvparser/mkvreader.h"
+
+#include <sys/types.h>
+
+#include <cassert>
+
+namespace mkvparser {
+
+MkvReader::MkvReader() : m_file(NULL), reader_owns_file_(true) {}
+
+MkvReader::MkvReader(FILE* fp) : m_file(fp), reader_owns_file_(false) {
+ GetFileSize();
+}
+
+MkvReader::~MkvReader() {
+ if (reader_owns_file_)
+ Close();
+ m_file = NULL;
+}
+
+int MkvReader::Open(const char* fileName) {
+ if (fileName == NULL)
+ return -1;
+
+ if (m_file)
+ return -1;
+
+#ifdef _MSC_VER
+ const errno_t e = fopen_s(&m_file, fileName, "rb");
+
+ if (e)
+ return -1; // error
+#else
+ m_file = fopen(fileName, "rb");
+
+ if (m_file == NULL)
+ return -1;
+#endif
+ return !GetFileSize();
+}
+
+bool MkvReader::GetFileSize() {
+ if (m_file == NULL)
+ return false;
+#ifdef _MSC_VER
+ int status = _fseeki64(m_file, 0L, SEEK_END);
+
+ if (status)
+ return false; // error
+
+ m_length = _ftelli64(m_file);
+#else
+ fseek(m_file, 0L, SEEK_END);
+ m_length = ftell(m_file);
+#endif
+ assert(m_length >= 0);
+
+ if (m_length < 0)
+ return false;
+
+#ifdef _MSC_VER
+ status = _fseeki64(m_file, 0L, SEEK_SET);
+
+ if (status)
+ return false; // error
+#else
+ fseek(m_file, 0L, SEEK_SET);
+#endif
+
+ return true;
+}
+
+void MkvReader::Close() {
+ if (m_file != NULL) {
+ fclose(m_file);
+ m_file = NULL;
+ }
+}
+
+int MkvReader::Length(long long* total, long long* available) {
+ if (m_file == NULL)
+ return -1;
+
+ if (total)
+ *total = m_length;
+
+ if (available)
+ *available = m_length;
+
+ return 0;
+}
+
+int MkvReader::Read(long long offset, long len, unsigned char* buffer) {
+ if (m_file == NULL)
+ return -1;
+
+ if (offset < 0)
+ return -1;
+
+ if (len < 0)
+ return -1;
+
+ if (len == 0)
+ return 0;
+
+ if (offset >= m_length)
+ return -1;
+
+#ifdef _MSC_VER
+ const int status = _fseeki64(m_file, offset, SEEK_SET);
+
+ if (status)
+ return -1; // error
+#else
+ fseeko(m_file, static_cast<off_t>(offset), SEEK_SET);
+#endif
+
+ const size_t size = fread(buffer, 1, len, m_file);
+
+ if (size < size_t(len))
+ return -1; // error
+
+ return 0; // success
+}
+
+} // namespace mkvparser
diff --git a/media/libaom/src/third_party/libwebm/mkvparser/mkvreader.h b/media/libaom/src/third_party/libwebm/mkvparser/mkvreader.h
new file mode 100644
index 000000000..9831ecf64
--- /dev/null
+++ b/media/libaom/src/third_party/libwebm/mkvparser/mkvreader.h
@@ -0,0 +1,45 @@
+// Copyright (c) 2010 The WebM project authors. All Rights Reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree. An additional intellectual property rights grant can be found
+// in the file PATENTS. All contributing project authors may
+// be found in the AUTHORS file in the root of the source tree.
+#ifndef MKVPARSER_MKVREADER_H_
+#define MKVPARSER_MKVREADER_H_
+
+#include <cstdio>
+
+#include "mkvparser/mkvparser.h"
+
+namespace mkvparser {
+
+class MkvReader : public IMkvReader {
+ public:
+ MkvReader();
+ explicit MkvReader(FILE* fp);
+ virtual ~MkvReader();
+
+ int Open(const char*);
+ void Close();
+
+ virtual int Read(long long position, long length, unsigned char* buffer);
+ virtual int Length(long long* total, long long* available);
+
+ private:
+ MkvReader(const MkvReader&);
+ MkvReader& operator=(const MkvReader&);
+
+ // Determines the size of the file. This is called either by the constructor
+ // or by the Open function depending on file ownership. Returns true on
+ // success.
+ bool GetFileSize();
+
+ long long m_length;
+ FILE* m_file;
+ bool reader_owns_file_;
+};
+
+} // namespace mkvparser
+
+#endif // MKVPARSER_MKVREADER_H_
diff --git a/media/libaom/src/third_party/libyuv/README.libaom b/media/libaom/src/third_party/libyuv/README.libaom
new file mode 100644
index 000000000..09693c1f2
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/README.libaom
@@ -0,0 +1,15 @@
+Name: libyuv
+URL: http://code.google.com/p/libyuv/
+Version: 1456
+License: BSD
+License File: LICENSE
+
+Description:
+libyuv is an open source project that includes YUV conversion and scaling
+functionality.
+
+The optimized scaler in libyuv is used in multiple resolution encoder example,
+which down-samples the original input video (f.g. 1280x720) a number of times
+in order to encode multiple resolution bit streams.
+
+Local Modifications:
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/basic_types.h b/media/libaom/src/third_party/libyuv/include/libyuv/basic_types.h
new file mode 100644
index 000000000..66e68536c
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/basic_types.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_BASIC_TYPES_H_ // NOLINT
+#define INCLUDE_LIBYUV_BASIC_TYPES_H_
+
+#include <stddef.h> // for NULL, size_t
+
+#if defined(__ANDROID__) || (defined(_MSC_VER) && (_MSC_VER < 1600))
+#include <sys/types.h> // for uintptr_t on x86
+#else
+#include <stdint.h> // for uintptr_t
+#endif
+
+#ifndef GG_LONGLONG
+#ifndef INT_TYPES_DEFINED
+#define INT_TYPES_DEFINED
+#ifdef COMPILER_MSVC
+typedef unsigned __int64 uint64;
+typedef __int64 int64;
+#ifndef INT64_C
+#define INT64_C(x) x ## I64
+#endif
+#ifndef UINT64_C
+#define UINT64_C(x) x ## UI64
+#endif
+#define INT64_F "I64"
+#else // COMPILER_MSVC
+#if defined(__LP64__) && !defined(__OpenBSD__) && !defined(__APPLE__)
+typedef unsigned long uint64; // NOLINT
+typedef long int64; // NOLINT
+#ifndef INT64_C
+#define INT64_C(x) x ## L
+#endif
+#ifndef UINT64_C
+#define UINT64_C(x) x ## UL
+#endif
+#define INT64_F "l"
+#else // defined(__LP64__) && !defined(__OpenBSD__) && !defined(__APPLE__)
+typedef unsigned long long uint64; // NOLINT
+typedef long long int64; // NOLINT
+#ifndef INT64_C
+#define INT64_C(x) x ## LL
+#endif
+#ifndef UINT64_C
+#define UINT64_C(x) x ## ULL
+#endif
+#define INT64_F "ll"
+#endif // __LP64__
+#endif // COMPILER_MSVC
+typedef unsigned int uint32;
+typedef int int32;
+typedef unsigned short uint16; // NOLINT
+typedef short int16; // NOLINT
+typedef unsigned char uint8;
+typedef signed char int8;
+#endif // INT_TYPES_DEFINED
+#endif // GG_LONGLONG
+
+// Detect compiler is for x86 or x64.
+#if defined(__x86_64__) || defined(_M_X64) || \
+ defined(__i386__) || defined(_M_IX86)
+#define CPU_X86 1
+#endif
+// Detect compiler is for ARM.
+#if defined(__arm__) || defined(_M_ARM)
+#define CPU_ARM 1
+#endif
+
+#ifndef ALIGNP
+#ifdef __cplusplus
+#define ALIGNP(p, t) \
+ (reinterpret_cast<uint8*>(((reinterpret_cast<uintptr_t>(p) + \
+ ((t) - 1)) & ~((t) - 1))))
+#else
+#define ALIGNP(p, t) \
+ ((uint8*)((((uintptr_t)(p) + ((t) - 1)) & ~((t) - 1)))) /* NOLINT */
+#endif
+#endif
+
+#if !defined(LIBYUV_API)
+#if defined(_WIN32) || defined(__CYGWIN__)
+#if defined(LIBYUV_BUILDING_SHARED_LIBRARY)
+#define LIBYUV_API __declspec(dllexport)
+#elif defined(LIBYUV_USING_SHARED_LIBRARY)
+#define LIBYUV_API __declspec(dllimport)
+#else
+#define LIBYUV_API
+#endif // LIBYUV_BUILDING_SHARED_LIBRARY
+#elif defined(__GNUC__) && (__GNUC__ >= 4) && !defined(__APPLE__) && \
+ (defined(LIBYUV_BUILDING_SHARED_LIBRARY) || \
+ defined(LIBYUV_USING_SHARED_LIBRARY))
+#define LIBYUV_API __attribute__ ((visibility ("default")))
+#else
+#define LIBYUV_API
+#endif // __GNUC__
+#endif // LIBYUV_API
+
+#define LIBYUV_BOOL int
+#define LIBYUV_FALSE 0
+#define LIBYUV_TRUE 1
+
+// Visual C x86 or GCC little endian.
+#if defined(__x86_64__) || defined(_M_X64) || \
+ defined(__i386__) || defined(_M_IX86) || \
+ defined(__arm__) || defined(_M_ARM) || \
+ (defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__)
+#define LIBYUV_LITTLE_ENDIAN
+#endif
+
+#endif // INCLUDE_LIBYUV_BASIC_TYPES_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/compare.h b/media/libaom/src/third_party/libyuv/include/libyuv/compare.h
new file mode 100644
index 000000000..2a9f1560c
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/compare.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_COMPARE_H_ // NOLINT
+#define INCLUDE_LIBYUV_COMPARE_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Compute a hash for specified memory. Seed of 5381 recommended.
+LIBYUV_API
+uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed);
+
+// Scan an opaque argb image and return fourcc based on alpha offset.
+// Returns FOURCC_ARGB, FOURCC_BGRA, or 0 if unknown.
+LIBYUV_API
+uint32 ARGBDetect(const uint8* argb, int stride_argb, int width, int height);
+
+// Sum Square Error - used to compute Mean Square Error or PSNR.
+LIBYUV_API
+uint64 ComputeSumSquareError(const uint8* src_a,
+ const uint8* src_b, int count);
+
+LIBYUV_API
+uint64 ComputeSumSquareErrorPlane(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b,
+ int width, int height);
+
+static const int kMaxPsnr = 128;
+
+LIBYUV_API
+double SumSquareErrorToPsnr(uint64 sse, uint64 count);
+
+LIBYUV_API
+double CalcFramePsnr(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b,
+ int width, int height);
+
+LIBYUV_API
+double I420Psnr(const uint8* src_y_a, int stride_y_a,
+ const uint8* src_u_a, int stride_u_a,
+ const uint8* src_v_a, int stride_v_a,
+ const uint8* src_y_b, int stride_y_b,
+ const uint8* src_u_b, int stride_u_b,
+ const uint8* src_v_b, int stride_v_b,
+ int width, int height);
+
+LIBYUV_API
+double CalcFrameSsim(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b,
+ int width, int height);
+
+LIBYUV_API
+double I420Ssim(const uint8* src_y_a, int stride_y_a,
+ const uint8* src_u_a, int stride_u_a,
+ const uint8* src_v_a, int stride_v_a,
+ const uint8* src_y_b, int stride_y_b,
+ const uint8* src_u_b, int stride_u_b,
+ const uint8* src_v_b, int stride_v_b,
+ int width, int height);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_COMPARE_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/convert.h b/media/libaom/src/third_party/libyuv/include/libyuv/convert.h
new file mode 100644
index 000000000..d6f206c10
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/convert.h
@@ -0,0 +1,246 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_CONVERT_H_ // NOLINT
+#define INCLUDE_LIBYUV_CONVERT_H_
+
+#include "libyuv/basic_types.h"
+// TODO(fbarchard): Remove the following headers includes.
+#include "libyuv/convert_from.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/rotate.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Convert I444 to I420.
+LIBYUV_API
+int I444ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert I422 to I420.
+LIBYUV_API
+int I422ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert I411 to I420.
+LIBYUV_API
+int I411ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Copy I420 to I420.
+#define I420ToI420 I420Copy
+LIBYUV_API
+int I420Copy(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert I400 (grey) to I420.
+LIBYUV_API
+int I400ToI420(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+#define J400ToJ420 I400ToI420
+
+// Convert NV12 to I420.
+LIBYUV_API
+int NV12ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert NV21 to I420.
+LIBYUV_API
+int NV21ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_vu, int src_stride_vu,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert YUY2 to I420.
+LIBYUV_API
+int YUY2ToI420(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert UYVY to I420.
+LIBYUV_API
+int UYVYToI420(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert M420 to I420.
+LIBYUV_API
+int M420ToI420(const uint8* src_m420, int src_stride_m420,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// ARGB little endian (bgra in memory) to I420.
+LIBYUV_API
+int ARGBToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// BGRA little endian (argb in memory) to I420.
+LIBYUV_API
+int BGRAToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// ABGR little endian (rgba in memory) to I420.
+LIBYUV_API
+int ABGRToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// RGBA little endian (abgr in memory) to I420.
+LIBYUV_API
+int RGBAToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// RGB little endian (bgr in memory) to I420.
+LIBYUV_API
+int RGB24ToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// RGB big endian (rgb in memory) to I420.
+LIBYUV_API
+int RAWToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// RGB16 (RGBP fourcc) little endian to I420.
+LIBYUV_API
+int RGB565ToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// RGB15 (RGBO fourcc) little endian to I420.
+LIBYUV_API
+int ARGB1555ToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// RGB12 (R444 fourcc) little endian to I420.
+LIBYUV_API
+int ARGB4444ToI420(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+#ifdef HAVE_JPEG
+// src_width/height provided by capture.
+// dst_width/height for clipping determine final size.
+LIBYUV_API
+int MJPGToI420(const uint8* sample, size_t sample_size,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int src_width, int src_height,
+ int dst_width, int dst_height);
+
+// Query size of MJPG in pixels.
+LIBYUV_API
+int MJPGSize(const uint8* sample, size_t sample_size,
+ int* width, int* height);
+#endif
+
+// Convert camera sample to I420 with cropping, rotation and vertical flip.
+// "src_size" is needed to parse MJPG.
+// "dst_stride_y" number of bytes in a row of the dst_y plane.
+// Normally this would be the same as dst_width, with recommended alignment
+// to 16 bytes for better efficiency.
+// If rotation of 90 or 270 is used, stride is affected. The caller should
+// allocate the I420 buffer according to rotation.
+// "dst_stride_u" number of bytes in a row of the dst_u plane.
+// Normally this would be the same as (dst_width + 1) / 2, with
+// recommended alignment to 16 bytes for better efficiency.
+// If rotation of 90 or 270 is used, stride is affected.
+// "crop_x" and "crop_y" are starting position for cropping.
+// To center, crop_x = (src_width - dst_width) / 2
+// crop_y = (src_height - dst_height) / 2
+// "src_width" / "src_height" is size of src_frame in pixels.
+// "src_height" can be negative indicating a vertically flipped image source.
+// "crop_width" / "crop_height" is the size to crop the src to.
+// Must be less than or equal to src_width/src_height
+// Cropping parameters are pre-rotation.
+// "rotation" can be 0, 90, 180 or 270.
+// "format" is a fourcc. ie 'I420', 'YUY2'
+// Returns 0 for successful; -1 for invalid parameter. Non-zero for failure.
+LIBYUV_API
+int ConvertToI420(const uint8* src_frame, size_t src_size,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int crop_x, int crop_y,
+ int src_width, int src_height,
+ int crop_width, int crop_height,
+ enum RotationMode rotation,
+ uint32 format);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_CONVERT_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/convert_argb.h b/media/libaom/src/third_party/libyuv/include/libyuv/convert_argb.h
new file mode 100644
index 000000000..ea75c0b26
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/convert_argb.h
@@ -0,0 +1,232 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_CONVERT_ARGB_H_ // NOLINT
+#define INCLUDE_LIBYUV_CONVERT_ARGB_H_
+
+#include "libyuv/basic_types.h"
+// TODO(fbarchard): Remove the following headers includes
+#include "libyuv/convert_from.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/rotate.h"
+
+// TODO(fbarchard): This set of functions should exactly match convert.h
+// TODO(fbarchard): Add tests. Create random content of right size and convert
+// with C vs Opt and or to I420 and compare.
+// TODO(fbarchard): Some of these functions lack parameter setting.
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Alias.
+#define ARGBToARGB ARGBCopy
+
+// Copy ARGB to ARGB.
+LIBYUV_API
+int ARGBCopy(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert I420 to ARGB.
+LIBYUV_API
+int I420ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert I422 to ARGB.
+LIBYUV_API
+int I422ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert I444 to ARGB.
+LIBYUV_API
+int I444ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert I411 to ARGB.
+LIBYUV_API
+int I411ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert I400 (grey) to ARGB. Reverse of ARGBToI400.
+LIBYUV_API
+int I400ToARGB(const uint8* src_y, int src_stride_y,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert J400 (jpeg grey) to ARGB.
+LIBYUV_API
+int J400ToARGB(const uint8* src_y, int src_stride_y,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Alias.
+#define YToARGB I400ToARGB
+
+// Convert NV12 to ARGB.
+LIBYUV_API
+int NV12ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert NV21 to ARGB.
+LIBYUV_API
+int NV21ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_vu, int src_stride_vu,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert M420 to ARGB.
+LIBYUV_API
+int M420ToARGB(const uint8* src_m420, int src_stride_m420,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert YUY2 to ARGB.
+LIBYUV_API
+int YUY2ToARGB(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert UYVY to ARGB.
+LIBYUV_API
+int UYVYToARGB(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert J420 to ARGB.
+LIBYUV_API
+int J420ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert J422 to ARGB.
+LIBYUV_API
+int J422ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// BGRA little endian (argb in memory) to ARGB.
+LIBYUV_API
+int BGRAToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// ABGR little endian (rgba in memory) to ARGB.
+LIBYUV_API
+int ABGRToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// RGBA little endian (abgr in memory) to ARGB.
+LIBYUV_API
+int RGBAToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Deprecated function name.
+#define BG24ToARGB RGB24ToARGB
+
+// RGB little endian (bgr in memory) to ARGB.
+LIBYUV_API
+int RGB24ToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// RGB big endian (rgb in memory) to ARGB.
+LIBYUV_API
+int RAWToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// RGB16 (RGBP fourcc) little endian to ARGB.
+LIBYUV_API
+int RGB565ToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// RGB15 (RGBO fourcc) little endian to ARGB.
+LIBYUV_API
+int ARGB1555ToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// RGB12 (R444 fourcc) little endian to ARGB.
+LIBYUV_API
+int ARGB4444ToARGB(const uint8* src_frame, int src_stride_frame,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+#ifdef HAVE_JPEG
+// src_width/height provided by capture
+// dst_width/height for clipping determine final size.
+LIBYUV_API
+int MJPGToARGB(const uint8* sample, size_t sample_size,
+ uint8* dst_argb, int dst_stride_argb,
+ int src_width, int src_height,
+ int dst_width, int dst_height);
+#endif
+
+// Convert camera sample to ARGB with cropping, rotation and vertical flip.
+// "src_size" is needed to parse MJPG.
+// "dst_stride_argb" number of bytes in a row of the dst_argb plane.
+// Normally this would be the same as dst_width, with recommended alignment
+// to 16 bytes for better efficiency.
+// If rotation of 90 or 270 is used, stride is affected. The caller should
+// allocate the I420 buffer according to rotation.
+// "dst_stride_u" number of bytes in a row of the dst_u plane.
+// Normally this would be the same as (dst_width + 1) / 2, with
+// recommended alignment to 16 bytes for better efficiency.
+// If rotation of 90 or 270 is used, stride is affected.
+// "crop_x" and "crop_y" are starting position for cropping.
+// To center, crop_x = (src_width - dst_width) / 2
+// crop_y = (src_height - dst_height) / 2
+// "src_width" / "src_height" is size of src_frame in pixels.
+// "src_height" can be negative indicating a vertically flipped image source.
+// "crop_width" / "crop_height" is the size to crop the src to.
+// Must be less than or equal to src_width/src_height
+// Cropping parameters are pre-rotation.
+// "rotation" can be 0, 90, 180 or 270.
+// "format" is a fourcc. ie 'I420', 'YUY2'
+// Returns 0 for successful; -1 for invalid parameter. Non-zero for failure.
+LIBYUV_API
+int ConvertToARGB(const uint8* src_frame, size_t src_size,
+ uint8* dst_argb, int dst_stride_argb,
+ int crop_x, int crop_y,
+ int src_width, int src_height,
+ int crop_width, int crop_height,
+ enum RotationMode rotation,
+ uint32 format);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_CONVERT_ARGB_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/convert_from.h b/media/libaom/src/third_party/libyuv/include/libyuv/convert_from.h
new file mode 100644
index 000000000..3591b4fd6
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/convert_from.h
@@ -0,0 +1,182 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_CONVERT_FROM_H_ // NOLINT
+#define INCLUDE_LIBYUV_CONVERT_FROM_H_
+
+#include "libyuv/basic_types.h"
+#include "libyuv/rotate.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// See Also convert.h for conversions from formats to I420.
+
+// I420Copy in convert to I420ToI420.
+
+LIBYUV_API
+int I420ToI422(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+LIBYUV_API
+int I420ToI444(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+LIBYUV_API
+int I420ToI411(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Copy to I400. Source can be I420, I422, I444, I400, NV12 or NV21.
+LIBYUV_API
+int I400Copy(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+// TODO(fbarchard): I420ToM420
+
+LIBYUV_API
+int I420ToNV12(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height);
+
+LIBYUV_API
+int I420ToNV21(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_vu, int dst_stride_vu,
+ int width, int height);
+
+LIBYUV_API
+int I420ToYUY2(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+LIBYUV_API
+int I420ToUYVY(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+LIBYUV_API
+int I420ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+LIBYUV_API
+int I420ToBGRA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+LIBYUV_API
+int I420ToABGR(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+LIBYUV_API
+int I420ToRGBA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgba, int dst_stride_rgba,
+ int width, int height);
+
+LIBYUV_API
+int I420ToRGB24(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+LIBYUV_API
+int I420ToRAW(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+LIBYUV_API
+int I420ToRGB565(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+// Convert I420 To RGB565 with 4x4 dither matrix (16 bytes).
+// Values in dither matrix from 0 to 7 recommended.
+// The order of the dither matrix is first byte is upper left.
+
+LIBYUV_API
+int I420ToRGB565Dither(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ const uint8* dither4x4, int width, int height);
+
+LIBYUV_API
+int I420ToARGB1555(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+LIBYUV_API
+int I420ToARGB4444(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+// Convert I420 to specified format.
+// "dst_sample_stride" is bytes in a row for the destination. Pass 0 if the
+// buffer has contiguous rows. Can be negative. A multiple of 16 is optimal.
+LIBYUV_API
+int ConvertFromI420(const uint8* y, int y_stride,
+ const uint8* u, int u_stride,
+ const uint8* v, int v_stride,
+ uint8* dst_sample, int dst_sample_stride,
+ int width, int height,
+ uint32 format);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_CONVERT_FROM_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/convert_from_argb.h b/media/libaom/src/third_party/libyuv/include/libyuv/convert_from_argb.h
new file mode 100644
index 000000000..4a6226813
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/convert_from_argb.h
@@ -0,0 +1,191 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_ // NOLINT
+#define INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Copy ARGB to ARGB.
+#define ARGBToARGB ARGBCopy
+LIBYUV_API
+int ARGBCopy(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert ARGB To BGRA.
+LIBYUV_API
+int ARGBToBGRA(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_bgra, int dst_stride_bgra,
+ int width, int height);
+
+// Convert ARGB To ABGR.
+LIBYUV_API
+int ARGBToABGR(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_abgr, int dst_stride_abgr,
+ int width, int height);
+
+// Convert ARGB To RGBA.
+LIBYUV_API
+int ARGBToRGBA(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgba, int dst_stride_rgba,
+ int width, int height);
+
+// Convert ARGB To RGB24.
+LIBYUV_API
+int ARGBToRGB24(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb24, int dst_stride_rgb24,
+ int width, int height);
+
+// Convert ARGB To RAW.
+LIBYUV_API
+int ARGBToRAW(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb, int dst_stride_rgb,
+ int width, int height);
+
+// Convert ARGB To RGB565.
+LIBYUV_API
+int ARGBToRGB565(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height);
+
+// Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
+// Values in dither matrix from 0 to 7 recommended.
+// The order of the dither matrix is first byte is upper left.
+// TODO(fbarchard): Consider pointer to 2d array for dither4x4.
+// const uint8(*dither)[4][4];
+LIBYUV_API
+int ARGBToRGB565Dither(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ const uint8* dither4x4, int width, int height);
+
+// Convert ARGB To ARGB1555.
+LIBYUV_API
+int ARGBToARGB1555(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb1555, int dst_stride_argb1555,
+ int width, int height);
+
+// Convert ARGB To ARGB4444.
+LIBYUV_API
+int ARGBToARGB4444(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb4444, int dst_stride_argb4444,
+ int width, int height);
+
+// Convert ARGB To I444.
+LIBYUV_API
+int ARGBToI444(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert ARGB To I422.
+LIBYUV_API
+int ARGBToI422(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert ARGB To I420. (also in convert.h)
+LIBYUV_API
+int ARGBToI420(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert ARGB to J420. (JPeg full range I420).
+LIBYUV_API
+int ARGBToJ420(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yj, int dst_stride_yj,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert ARGB to J422.
+LIBYUV_API
+int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yj, int dst_stride_yj,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert ARGB To I411.
+LIBYUV_API
+int ARGBToI411(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert ARGB to J400. (JPeg full range).
+LIBYUV_API
+int ARGBToJ400(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yj, int dst_stride_yj,
+ int width, int height);
+
+// Convert ARGB to I400.
+LIBYUV_API
+int ARGBToI400(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+// Convert ARGB to G. (Reverse of J400toARGB, which replicates G back to ARGB)
+LIBYUV_API
+int ARGBToG(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_g, int dst_stride_g,
+ int width, int height);
+
+// Convert ARGB To NV12.
+LIBYUV_API
+int ARGBToNV12(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height);
+
+// Convert ARGB To NV21.
+LIBYUV_API
+int ARGBToNV21(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_vu, int dst_stride_vu,
+ int width, int height);
+
+// Convert ARGB To NV21.
+LIBYUV_API
+int ARGBToNV21(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_vu, int dst_stride_vu,
+ int width, int height);
+
+// Convert ARGB To YUY2.
+LIBYUV_API
+int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yuy2, int dst_stride_yuy2,
+ int width, int height);
+
+// Convert ARGB To UYVY.
+LIBYUV_API
+int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_uyvy, int dst_stride_uyvy,
+ int width, int height);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_CONVERT_FROM_ARGB_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/cpu_id.h b/media/libaom/src/third_party/libyuv/include/libyuv/cpu_id.h
new file mode 100644
index 000000000..870e94e8c
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/cpu_id.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_CPU_ID_H_ // NOLINT
+#define INCLUDE_LIBYUV_CPU_ID_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// TODO(fbarchard): Consider overlapping bits for different architectures.
+// Internal flag to indicate cpuid requires initialization.
+#define kCpuInit 0x1
+
+// These flags are only valid on ARM processors.
+static const int kCpuHasARM = 0x2;
+static const int kCpuHasNEON = 0x4;
+// 0x8 reserved for future ARM flag.
+
+// These flags are only valid on x86 processors.
+static const int kCpuHasX86 = 0x10;
+static const int kCpuHasSSE2 = 0x20;
+static const int kCpuHasSSSE3 = 0x40;
+static const int kCpuHasSSE41 = 0x80;
+static const int kCpuHasSSE42 = 0x100;
+static const int kCpuHasAVX = 0x200;
+static const int kCpuHasAVX2 = 0x400;
+static const int kCpuHasERMS = 0x800;
+static const int kCpuHasFMA3 = 0x1000;
+// 0x2000, 0x4000, 0x8000 reserved for future X86 flags.
+
+// These flags are only valid on MIPS processors.
+static const int kCpuHasMIPS = 0x10000;
+static const int kCpuHasMIPS_DSP = 0x20000;
+static const int kCpuHasMIPS_DSPR2 = 0x40000;
+
+// Internal function used to auto-init.
+LIBYUV_API
+int InitCpuFlags(void);
+
+// Internal function for parsing /proc/cpuinfo.
+LIBYUV_API
+int ArmCpuCaps(const char* cpuinfo_name);
+
+// Detect CPU has SSE2 etc.
+// Test_flag parameter should be one of kCpuHas constants above.
+// returns non-zero if instruction set is detected
+static __inline int TestCpuFlag(int test_flag) {
+ LIBYUV_API extern int cpu_info_;
+ return (cpu_info_ == kCpuInit ? InitCpuFlags() : cpu_info_) & test_flag;
+}
+
+// For testing, allow CPU flags to be disabled.
+// ie MaskCpuFlags(~kCpuHasSSSE3) to disable SSSE3.
+// MaskCpuFlags(-1) to enable all cpu specific optimizations.
+// MaskCpuFlags(0) to disable all cpu specific optimizations.
+LIBYUV_API
+void MaskCpuFlags(int enable_flags);
+
+// Low level cpuid for X86. Returns zeros on other CPUs.
+// eax is the info type that you want.
+// ecx is typically the cpu number, and should normally be zero.
+LIBYUV_API
+void CpuId(uint32 eax, uint32 ecx, uint32* cpu_info);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_CPU_ID_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/mjpeg_decoder.h b/media/libaom/src/third_party/libyuv/include/libyuv/mjpeg_decoder.h
new file mode 100644
index 000000000..fa1e51f9a
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/mjpeg_decoder.h
@@ -0,0 +1,193 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_MJPEG_DECODER_H_ // NOLINT
+#define INCLUDE_LIBYUV_MJPEG_DECODER_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+// NOTE: For a simplified public API use convert.h MJPGToI420().
+
+struct jpeg_common_struct;
+struct jpeg_decompress_struct;
+struct jpeg_source_mgr;
+
+namespace libyuv {
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+LIBYUV_BOOL ValidateJpeg(const uint8* sample, size_t sample_size);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+static const uint32 kUnknownDataSize = 0xFFFFFFFF;
+
+enum JpegSubsamplingType {
+ kJpegYuv420,
+ kJpegYuv422,
+ kJpegYuv411,
+ kJpegYuv444,
+ kJpegYuv400,
+ kJpegUnknown
+};
+
+struct Buffer {
+ const uint8* data;
+ int len;
+};
+
+struct BufferVector {
+ Buffer* buffers;
+ int len;
+ int pos;
+};
+
+struct SetJmpErrorMgr;
+
+// MJPEG ("Motion JPEG") is a pseudo-standard video codec where the frames are
+// simply independent JPEG images with a fixed huffman table (which is omitted).
+// It is rarely used in video transmission, but is common as a camera capture
+// format, especially in Logitech devices. This class implements a decoder for
+// MJPEG frames.
+//
+// See http://tools.ietf.org/html/rfc2435
+class LIBYUV_API MJpegDecoder {
+ public:
+ typedef void (*CallbackFunction)(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows);
+
+ static const int kColorSpaceUnknown;
+ static const int kColorSpaceGrayscale;
+ static const int kColorSpaceRgb;
+ static const int kColorSpaceYCbCr;
+ static const int kColorSpaceCMYK;
+ static const int kColorSpaceYCCK;
+
+ MJpegDecoder();
+ ~MJpegDecoder();
+
+ // Loads a new frame, reads its headers, and determines the uncompressed
+ // image format.
+ // Returns LIBYUV_TRUE if image looks valid and format is supported.
+ // If return value is LIBYUV_TRUE, then the values for all the following
+ // getters are populated.
+ // src_len is the size of the compressed mjpeg frame in bytes.
+ LIBYUV_BOOL LoadFrame(const uint8* src, size_t src_len);
+
+ // Returns width of the last loaded frame in pixels.
+ int GetWidth();
+
+ // Returns height of the last loaded frame in pixels.
+ int GetHeight();
+
+ // Returns format of the last loaded frame. The return value is one of the
+ // kColorSpace* constants.
+ int GetColorSpace();
+
+ // Number of color components in the color space.
+ int GetNumComponents();
+
+ // Sample factors of the n-th component.
+ int GetHorizSampFactor(int component);
+
+ int GetVertSampFactor(int component);
+
+ int GetHorizSubSampFactor(int component);
+
+ int GetVertSubSampFactor(int component);
+
+ // Public for testability.
+ int GetImageScanlinesPerImcuRow();
+
+ // Public for testability.
+ int GetComponentScanlinesPerImcuRow(int component);
+
+ // Width of a component in bytes.
+ int GetComponentWidth(int component);
+
+ // Height of a component.
+ int GetComponentHeight(int component);
+
+ // Width of a component in bytes with padding for DCTSIZE. Public for testing.
+ int GetComponentStride(int component);
+
+ // Size of a component in bytes.
+ int GetComponentSize(int component);
+
+ // Call this after LoadFrame() if you decide you don't want to decode it
+ // after all.
+ LIBYUV_BOOL UnloadFrame();
+
+ // Decodes the entire image into a one-buffer-per-color-component format.
+ // dst_width must match exactly. dst_height must be <= to image height; if
+ // less, the image is cropped. "planes" must have size equal to at least
+ // GetNumComponents() and they must point to non-overlapping buffers of size
+ // at least GetComponentSize(i). The pointers in planes are incremented
+ // to point to after the end of the written data.
+ // TODO(fbarchard): Add dst_x, dst_y to allow specific rect to be decoded.
+ LIBYUV_BOOL DecodeToBuffers(uint8** planes, int dst_width, int dst_height);
+
+ // Decodes the entire image and passes the data via repeated calls to a
+ // callback function. Each call will get the data for a whole number of
+ // image scanlines.
+ // TODO(fbarchard): Add dst_x, dst_y to allow specific rect to be decoded.
+ LIBYUV_BOOL DecodeToCallback(CallbackFunction fn, void* opaque,
+ int dst_width, int dst_height);
+
+ // The helper function which recognizes the jpeg sub-sampling type.
+ static JpegSubsamplingType JpegSubsamplingTypeHelper(
+ int* subsample_x, int* subsample_y, int number_of_components);
+
+ private:
+ void AllocOutputBuffers(int num_outbufs);
+ void DestroyOutputBuffers();
+
+ LIBYUV_BOOL StartDecode();
+ LIBYUV_BOOL FinishDecode();
+
+ void SetScanlinePointers(uint8** data);
+ LIBYUV_BOOL DecodeImcuRow();
+
+ int GetComponentScanlinePadding(int component);
+
+ // A buffer holding the input data for a frame.
+ Buffer buf_;
+ BufferVector buf_vec_;
+
+ jpeg_decompress_struct* decompress_struct_;
+ jpeg_source_mgr* source_mgr_;
+ SetJmpErrorMgr* error_mgr_;
+
+ // LIBYUV_TRUE iff at least one component has scanline padding. (i.e.,
+ // GetComponentScanlinePadding() != 0.)
+ LIBYUV_BOOL has_scanline_padding_;
+
+ // Temporaries used to point to scanline outputs.
+ int num_outbufs_; // Outermost size of all arrays below.
+ uint8*** scanlines_;
+ int* scanlines_sizes_;
+ // Temporary buffer used for decoding when we can't decode directly to the
+ // output buffers. Large enough for just one iMCU row.
+ uint8** databuf_;
+ int* databuf_strides_;
+};
+
+} // namespace libyuv
+
+#endif // __cplusplus
+#endif // INCLUDE_LIBYUV_MJPEG_DECODER_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/planar_functions.h b/media/libaom/src/third_party/libyuv/include/libyuv/planar_functions.h
new file mode 100644
index 000000000..7fe4d8eed
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/planar_functions.h
@@ -0,0 +1,454 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_PLANAR_FUNCTIONS_H_ // NOLINT
+#define INCLUDE_LIBYUV_PLANAR_FUNCTIONS_H_
+
+#include "libyuv/basic_types.h"
+
+// TODO(fbarchard): Remove the following headers includes.
+#include "libyuv/convert.h"
+#include "libyuv/convert_argb.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Copy a plane of data.
+LIBYUV_API
+void CopyPlane(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+LIBYUV_API
+void CopyPlane_16(const uint16* src_y, int src_stride_y,
+ uint16* dst_y, int dst_stride_y,
+ int width, int height);
+
+// Set a plane of data to a 32 bit value.
+LIBYUV_API
+void SetPlane(uint8* dst_y, int dst_stride_y,
+ int width, int height,
+ uint32 value);
+
+// Copy I400. Supports inverting.
+LIBYUV_API
+int I400ToI400(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+#define J400ToJ400 I400ToI400
+
+// Copy I422 to I422.
+#define I422ToI422 I422Copy
+LIBYUV_API
+int I422Copy(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Copy I444 to I444.
+#define I444ToI444 I444Copy
+LIBYUV_API
+int I444Copy(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert YUY2 to I422.
+LIBYUV_API
+int YUY2ToI422(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Convert UYVY to I422.
+LIBYUV_API
+int UYVYToI422(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+LIBYUV_API
+int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height);
+
+LIBYUV_API
+int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height);
+
+// Convert I420 to I400. (calls CopyPlane ignoring u/v).
+LIBYUV_API
+int I420ToI400(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+// Alias
+#define J420ToJ400 I420ToI400
+#define I420ToI420Mirror I420Mirror
+
+// I420 mirror.
+LIBYUV_API
+int I420Mirror(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height);
+
+// Alias
+#define I400ToI400Mirror I400Mirror
+
+// I400 mirror. A single plane is mirrored horizontally.
+// Pass negative height to achieve 180 degree rotation.
+LIBYUV_API
+int I400Mirror(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+// Alias
+#define ARGBToARGBMirror ARGBMirror
+
+// ARGB mirror.
+LIBYUV_API
+int ARGBMirror(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert NV12 to RGB565.
+LIBYUV_API
+int NV12ToRGB565(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height);
+
+// Convert NV21 to RGB565.
+LIBYUV_API
+int NV21ToRGB565(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height);
+
+// I422ToARGB is in convert_argb.h
+// Convert I422 to BGRA.
+LIBYUV_API
+int I422ToBGRA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_bgra, int dst_stride_bgra,
+ int width, int height);
+
+// Convert I422 to ABGR.
+LIBYUV_API
+int I422ToABGR(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_abgr, int dst_stride_abgr,
+ int width, int height);
+
+// Convert I422 to RGBA.
+LIBYUV_API
+int I422ToRGBA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgba, int dst_stride_rgba,
+ int width, int height);
+
+// Draw a rectangle into I420.
+LIBYUV_API
+int I420Rect(uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int x, int y, int width, int height,
+ int value_y, int value_u, int value_v);
+
+// Draw a rectangle into ARGB.
+LIBYUV_API
+int ARGBRect(uint8* dst_argb, int dst_stride_argb,
+ int x, int y, int width, int height, uint32 value);
+
+// Convert ARGB to gray scale ARGB.
+LIBYUV_API
+int ARGBGrayTo(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Make a rectangle of ARGB gray scale.
+LIBYUV_API
+int ARGBGray(uint8* dst_argb, int dst_stride_argb,
+ int x, int y, int width, int height);
+
+// Make a rectangle of ARGB Sepia tone.
+LIBYUV_API
+int ARGBSepia(uint8* dst_argb, int dst_stride_argb,
+ int x, int y, int width, int height);
+
+// Apply a matrix rotation to each ARGB pixel.
+// matrix_argb is 4 signed ARGB values. -128 to 127 representing -2 to 2.
+// The first 4 coefficients apply to B, G, R, A and produce B of the output.
+// The next 4 coefficients apply to B, G, R, A and produce G of the output.
+// The next 4 coefficients apply to B, G, R, A and produce R of the output.
+// The last 4 coefficients apply to B, G, R, A and produce A of the output.
+LIBYUV_API
+int ARGBColorMatrix(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ const int8* matrix_argb,
+ int width, int height);
+
+// Deprecated. Use ARGBColorMatrix instead.
+// Apply a matrix rotation to each ARGB pixel.
+// matrix_argb is 3 signed ARGB values. -128 to 127 representing -1 to 1.
+// The first 4 coefficients apply to B, G, R, A and produce B of the output.
+// The next 4 coefficients apply to B, G, R, A and produce G of the output.
+// The last 4 coefficients apply to B, G, R, A and produce R of the output.
+LIBYUV_API
+int RGBColorMatrix(uint8* dst_argb, int dst_stride_argb,
+ const int8* matrix_rgb,
+ int x, int y, int width, int height);
+
+// Apply a color table each ARGB pixel.
+// Table contains 256 ARGB values.
+LIBYUV_API
+int ARGBColorTable(uint8* dst_argb, int dst_stride_argb,
+ const uint8* table_argb,
+ int x, int y, int width, int height);
+
+// Apply a color table each ARGB pixel but preserve destination alpha.
+// Table contains 256 ARGB values.
+LIBYUV_API
+int RGBColorTable(uint8* dst_argb, int dst_stride_argb,
+ const uint8* table_argb,
+ int x, int y, int width, int height);
+
+// Apply a luma/color table each ARGB pixel but preserve destination alpha.
+// Table contains 32768 values indexed by [Y][C] where 7 it 7 bit luma from
+// RGB (YJ style) and C is an 8 bit color component (R, G or B).
+LIBYUV_API
+int ARGBLumaColorTable(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ const uint8* luma_rgb_table,
+ int width, int height);
+
+// Apply a 3 term polynomial to ARGB values.
+// poly points to a 4x4 matrix. The first row is constants. The 2nd row is
+// coefficients for b, g, r and a. The 3rd row is coefficients for b squared,
+// g squared, r squared and a squared. The 4rd row is coefficients for b to
+// the 3, g to the 3, r to the 3 and a to the 3. The values are summed and
+// result clamped to 0 to 255.
+// A polynomial approximation can be dirived using software such as 'R'.
+
+LIBYUV_API
+int ARGBPolynomial(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ const float* poly,
+ int width, int height);
+
+// Quantize a rectangle of ARGB. Alpha unaffected.
+// scale is a 16 bit fractional fixed point scaler between 0 and 65535.
+// interval_size should be a value between 1 and 255.
+// interval_offset should be a value between 0 and 255.
+LIBYUV_API
+int ARGBQuantize(uint8* dst_argb, int dst_stride_argb,
+ int scale, int interval_size, int interval_offset,
+ int x, int y, int width, int height);
+
+// Copy ARGB to ARGB.
+LIBYUV_API
+int ARGBCopy(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Copy ARGB to ARGB.
+LIBYUV_API
+int ARGBCopyAlpha(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Copy ARGB to ARGB.
+LIBYUV_API
+int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+typedef void (*ARGBBlendRow)(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+
+// Get function to Alpha Blend ARGB pixels and store to destination.
+LIBYUV_API
+ARGBBlendRow GetARGBBlend();
+
+// Alpha Blend ARGB images and store to destination.
+// Alpha of destination is set to 255.
+LIBYUV_API
+int ARGBBlend(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Multiply ARGB image by ARGB image. Shifted down by 8. Saturates to 255.
+LIBYUV_API
+int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Add ARGB image with ARGB image. Saturates to 255.
+LIBYUV_API
+int ARGBAdd(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Subtract ARGB image (argb1) from ARGB image (argb0). Saturates to 0.
+LIBYUV_API
+int ARGBSubtract(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert I422 to YUY2.
+LIBYUV_API
+int I422ToYUY2(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+// Convert I422 to UYVY.
+LIBYUV_API
+int I422ToUYVY(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_frame, int dst_stride_frame,
+ int width, int height);
+
+// Convert unattentuated ARGB to preattenuated ARGB.
+LIBYUV_API
+int ARGBAttenuate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert preattentuated ARGB to unattenuated ARGB.
+LIBYUV_API
+int ARGBUnattenuate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Convert MJPG to ARGB.
+LIBYUV_API
+int MJPGToARGB(const uint8* sample, size_t sample_size,
+ uint8* argb, int argb_stride,
+ int w, int h, int dw, int dh);
+
+// Internal function - do not call directly.
+// Computes table of cumulative sum for image where the value is the sum
+// of all values above and to the left of the entry. Used by ARGBBlur.
+LIBYUV_API
+int ARGBComputeCumulativeSum(const uint8* src_argb, int src_stride_argb,
+ int32* dst_cumsum, int dst_stride32_cumsum,
+ int width, int height);
+
+// Blur ARGB image.
+// dst_cumsum table of width * (height + 1) * 16 bytes aligned to
+// 16 byte boundary.
+// dst_stride32_cumsum is number of ints in a row (width * 4).
+// radius is number of pixels around the center. e.g. 1 = 3x3. 2=5x5.
+// Blur is optimized for radius of 5 (11x11) or less.
+LIBYUV_API
+int ARGBBlur(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int32* dst_cumsum, int dst_stride32_cumsum,
+ int width, int height, int radius);
+
+// Multiply ARGB image by ARGB value.
+LIBYUV_API
+int ARGBShade(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height, uint32 value);
+
+// Interpolate between two ARGB images using specified amount of interpolation
+// (0 to 255) and store to destination.
+// 'interpolation' is specified as 8 bit fraction where 0 means 100% src_argb0
+// and 255 means 1% src_argb0 and 99% src_argb1.
+// Internally uses ARGBScale bilinear filtering.
+// Caveat: This function will write up to 16 bytes beyond the end of dst_argb.
+LIBYUV_API
+int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height, int interpolation);
+
+#if defined(__pnacl__) || defined(__CLR_VER) || \
+ (defined(__i386__) && !defined(__SSE2__))
+#define LIBYUV_DISABLE_X86
+#endif
+// The following are available on all x86 platforms:
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
+#define HAS_ARGBAFFINEROW_SSE2
+#endif
+
+// Row function for copying pixels from a source with a slope to a row
+// of destination. Useful for scaling, rotation, mirror, texture mapping.
+LIBYUV_API
+void ARGBAffineRow_C(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* uv_dudv, int width);
+LIBYUV_API
+void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* uv_dudv, int width);
+
+// Shuffle ARGB channel order. e.g. BGRA to ARGB.
+// shuffler is 16 bytes and must be aligned.
+LIBYUV_API
+int ARGBShuffle(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_argb, int dst_stride_argb,
+ const uint8* shuffler, int width, int height);
+
+// Sobel ARGB effect with planar output.
+LIBYUV_API
+int ARGBSobelToPlane(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height);
+
+// Sobel ARGB effect.
+LIBYUV_API
+int ARGBSobel(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+// Sobel ARGB effect w/ Sobel X, Sobel, Sobel Y in ARGB.
+LIBYUV_API
+int ARGBSobelXY(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_PLANAR_FUNCTIONS_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/rotate.h b/media/libaom/src/third_party/libyuv/include/libyuv/rotate.h
new file mode 100644
index 000000000..8a9673f28
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/rotate.h
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_ROTATE_H_ // NOLINT
+#define INCLUDE_LIBYUV_ROTATE_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Supported rotation.
+typedef enum RotationMode {
+ kRotate0 = 0, // No rotation.
+ kRotate90 = 90, // Rotate 90 degrees clockwise.
+ kRotate180 = 180, // Rotate 180 degrees.
+ kRotate270 = 270, // Rotate 270 degrees clockwise.
+
+ // Deprecated.
+ kRotateNone = 0,
+ kRotateClockwise = 90,
+ kRotateCounterClockwise = 270,
+} RotationModeEnum;
+
+// Rotate I420 frame.
+LIBYUV_API
+int I420Rotate(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int src_width, int src_height, enum RotationMode mode);
+
+// Rotate NV12 input and store in I420.
+LIBYUV_API
+int NV12ToI420Rotate(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int src_width, int src_height, enum RotationMode mode);
+
+// Rotate a plane by 0, 90, 180, or 270.
+LIBYUV_API
+int RotatePlane(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int src_width, int src_height, enum RotationMode mode);
+
+// Rotate planes by 90, 180, 270. Deprecated.
+LIBYUV_API
+void RotatePlane90(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height);
+
+LIBYUV_API
+void RotatePlane180(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height);
+
+LIBYUV_API
+void RotatePlane270(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height);
+
+LIBYUV_API
+void RotateUV90(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height);
+
+// Rotations for when U and V are interleaved.
+// These functions take one input pointer and
+// split the data into two buffers while
+// rotating them. Deprecated.
+LIBYUV_API
+void RotateUV180(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height);
+
+LIBYUV_API
+void RotateUV270(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height);
+
+// The 90 and 270 functions are based on transposes.
+// Doing a transpose with reversing the read/write
+// order will result in a rotation by +- 90 degrees.
+// Deprecated.
+LIBYUV_API
+void TransposePlane(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height);
+
+LIBYUV_API
+void TransposeUV(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_ROTATE_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/rotate_argb.h b/media/libaom/src/third_party/libyuv/include/libyuv/rotate_argb.h
new file mode 100644
index 000000000..2bdc8ec6b
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/rotate_argb.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_ROTATE_ARGB_H_ // NOLINT
+#define INCLUDE_LIBYUV_ROTATE_ARGB_H_
+
+#include "libyuv/basic_types.h"
+#include "libyuv/rotate.h" // For RotationMode.
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Rotate ARGB frame
+LIBYUV_API
+int ARGBRotate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int src_width, int src_height, enum RotationMode mode);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_ROTATE_ARGB_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/rotate_row.h b/media/libaom/src/third_party/libyuv/include/libyuv/rotate_row.h
new file mode 100644
index 000000000..d0bfbdd2b
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/rotate_row.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_ROTATE_ROW_H_ // NOLINT
+#define INCLUDE_LIBYUV_ROTATE_ROW_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if defined(__pnacl__) || defined(__CLR_VER) || \
+ (defined(__i386__) && !defined(__SSE2__))
+#define LIBYUV_DISABLE_X86
+#endif
+
+// Visual C 2012 required for AVX2.
+#if defined(_M_IX86) && !defined(__clang__) && \
+ defined(_MSC_VER) && _MSC_VER >= 1700
+#define VISUALC_HAS_AVX2 1
+#endif // VisualStudio >= 2012
+
+// TODO(fbarchard): switch to standard form of inline; fails on clangcl.
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
+#if defined(__APPLE__) && defined(__i386__)
+#define DECLARE_FUNCTION(name) \
+ ".text \n" \
+ ".private_extern _" #name " \n" \
+ ".align 4,0x90 \n" \
+"_" #name ": \n"
+#elif defined(__MINGW32__) || defined(__CYGWIN__) && defined(__i386__)
+#define DECLARE_FUNCTION(name) \
+ ".text \n" \
+ ".align 4,0x90 \n" \
+"_" #name ": \n"
+#else
+#define DECLARE_FUNCTION(name) \
+ ".text \n" \
+ ".align 4,0x90 \n" \
+#name ": \n"
+#endif
+#endif
+
+// The following are available for Visual C:
+#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \
+ defined(_MSC_VER) && !defined(__clang__)
+#define HAS_TRANSPOSEWX8_SSSE3
+#define HAS_TRANSPOSEUVWX8_SSE2
+#endif
+
+// The following are available for GCC but not NaCL:
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(__i386__) || (defined(__x86_64__) && !defined(__native_client__)))
+#define HAS_TRANSPOSEWX8_SSSE3
+#endif
+
+// The following are available for 32 bit GCC:
+#if !defined(LIBYUV_DISABLE_X86) && defined(__i386__) && !defined(__clang__)
+#define HAS_TRANSPOSEUVWX8_SSE2
+#endif
+
+// The following are available for 64 bit GCC but not NaCL:
+#if !defined(LIBYUV_DISABLE_X86) && !defined(__native_client__) && \
+ defined(__x86_64__)
+#define HAS_TRANSPOSEWX8_FAST_SSSE3
+#define HAS_TRANSPOSEUVWX8_SSE2
+#endif
+
+#if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \
+ (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
+#define HAS_TRANSPOSEWX8_NEON
+#define HAS_TRANSPOSEUVWX8_NEON
+#endif
+
+#if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \
+ defined(__mips__) && \
+ defined(__mips_dsp) && (__mips_dsp_rev >= 2)
+#define HAS_TRANSPOSEWX8_MIPS_DSPR2
+#define HAS_TRANSPOSEUVWx8_MIPS_DSPR2
+#endif // defined(__mips__)
+
+void TransposeWxH_C(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width, int height);
+
+void TransposeWx8_C(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_NEON(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_Fast_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+
+void TransposeWx8_Any_NEON(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_Any_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_Fast_Any_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+void TransposeWx8_Any_MIPS_DSPR2(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width);
+
+void TransposeUVWxH_C(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height);
+
+void TransposeUVWx8_C(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
+void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
+void TransposeUVWx8_NEON(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
+void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_ROTATE_ROW_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/row.h b/media/libaom/src/third_party/libyuv/include/libyuv/row.h
new file mode 100644
index 000000000..5c3187ef7
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/row.h
@@ -0,0 +1,1857 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_ROW_H_ // NOLINT
+#define INCLUDE_LIBYUV_ROW_H_
+
+#include <stdlib.h> // For malloc.
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#define IS_ALIGNED(p, a) (!((uintptr_t)(p) & ((a) - 1)))
+
+#ifdef __cplusplus
+#define align_buffer_64(var, size) \
+ uint8* var##_mem = reinterpret_cast<uint8*>(malloc((size) + 63)); \
+ uint8* var = reinterpret_cast<uint8*> \
+ ((reinterpret_cast<intptr_t>(var##_mem) + 63) & ~63)
+#else
+#define align_buffer_64(var, size) \
+ uint8* var##_mem = (uint8*)(malloc((size) + 63)); /* NOLINT */ \
+ uint8* var = (uint8*)(((intptr_t)(var##_mem) + 63) & ~63) /* NOLINT */
+#endif
+
+#define free_aligned_buffer_64(var) \
+ free(var##_mem); \
+ var = 0
+
+#if defined(__pnacl__) || defined(__CLR_VER) || \
+ (defined(__i386__) && !defined(__SSE2__))
+#define LIBYUV_DISABLE_X86
+#endif
+// True if compiling for SSSE3 as a requirement.
+#if defined(__SSSE3__) || (defined(_M_IX86_FP) && (_M_IX86_FP >= 3))
+#define LIBYUV_SSSE3_ONLY
+#endif
+
+#if defined(__native_client__)
+#define LIBYUV_DISABLE_NEON
+#endif
+// clang >= 3.5.0 required for Arm64.
+#if defined(__clang__) && defined(__aarch64__) && !defined(LIBYUV_DISABLE_NEON)
+#if (__clang_major__ < 3) || (__clang_major__ == 3 && (__clang_minor__ < 5))
+#define LIBYUV_DISABLE_NEON
+#endif // clang >= 3.5
+#endif // __clang__
+
+// The following are available on all x86 platforms:
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
+// Conversions:
+#define HAS_ABGRTOUVROW_SSSE3
+#define HAS_ABGRTOYROW_SSSE3
+#define HAS_ARGB1555TOARGBROW_SSE2
+#define HAS_ARGB4444TOARGBROW_SSE2
+#define HAS_ARGBSETROW_X86
+#define HAS_ARGBSHUFFLEROW_SSE2
+#define HAS_ARGBSHUFFLEROW_SSSE3
+#define HAS_ARGBTOARGB1555ROW_SSE2
+#define HAS_ARGBTOARGB4444ROW_SSE2
+#define HAS_ARGBTORAWROW_SSSE3
+#define HAS_ARGBTORGB24ROW_SSSE3
+#define HAS_ARGBTORGB565ROW_SSE2
+#define HAS_ARGBTOUV422ROW_SSSE3
+#define HAS_ARGBTOUV444ROW_SSSE3
+#define HAS_ARGBTOUVJROW_SSSE3
+#define HAS_ARGBTOUVROW_SSSE3
+#define HAS_ARGBTOYJROW_SSSE3
+#define HAS_ARGBTOYROW_SSSE3
+#define HAS_BGRATOUVROW_SSSE3
+#define HAS_BGRATOYROW_SSSE3
+#define HAS_COPYROW_ERMS
+#define HAS_COPYROW_SSE2
+#define HAS_I400TOARGBROW_SSE2
+#define HAS_I411TOARGBROW_SSSE3
+#define HAS_I422TOABGRROW_SSSE3
+#define HAS_I422TOARGB1555ROW_SSSE3
+#define HAS_I422TOARGB4444ROW_SSSE3
+#define HAS_I422TOARGBROW_SSSE3
+#define HAS_I422TOBGRAROW_SSSE3
+#define HAS_I422TORAWROW_SSSE3
+#define HAS_I422TORGB24ROW_SSSE3
+#define HAS_I422TORGB565ROW_SSSE3
+#define HAS_I422TORGBAROW_SSSE3
+#define HAS_I422TOUYVYROW_SSE2
+#define HAS_I422TOYUY2ROW_SSE2
+#define HAS_I444TOARGBROW_SSSE3
+#define HAS_J400TOARGBROW_SSE2
+#define HAS_J422TOARGBROW_SSSE3
+#define HAS_MERGEUVROW_SSE2
+#define HAS_MIRRORROW_SSE2
+#define HAS_MIRRORROW_SSSE3
+#define HAS_MIRRORROW_UV_SSSE3
+#define HAS_MIRRORUVROW_SSSE3
+#define HAS_NV12TOARGBROW_SSSE3
+#define HAS_NV12TORGB565ROW_SSSE3
+#define HAS_NV21TOARGBROW_SSSE3
+#define HAS_NV21TORGB565ROW_SSSE3
+#define HAS_RAWTOARGBROW_SSSE3
+#define HAS_RAWTOYROW_SSSE3
+#define HAS_RGB24TOARGBROW_SSSE3
+#define HAS_RGB24TOYROW_SSSE3
+#define HAS_RGB565TOARGBROW_SSE2
+#define HAS_RGBATOUVROW_SSSE3
+#define HAS_RGBATOYROW_SSSE3
+#define HAS_SETROW_ERMS
+#define HAS_SETROW_X86
+#define HAS_SPLITUVROW_SSE2
+#define HAS_UYVYTOARGBROW_SSSE3
+#define HAS_UYVYTOUV422ROW_SSE2
+#define HAS_UYVYTOUVROW_SSE2
+#define HAS_UYVYTOYROW_SSE2
+#define HAS_YUY2TOARGBROW_SSSE3
+#define HAS_YUY2TOUV422ROW_SSE2
+#define HAS_YUY2TOUVROW_SSE2
+#define HAS_YUY2TOYROW_SSE2
+
+// Effects:
+#define HAS_ARGBADDROW_SSE2
+#define HAS_ARGBAFFINEROW_SSE2
+#define HAS_ARGBATTENUATEROW_SSSE3
+#define HAS_ARGBBLENDROW_SSSE3
+#define HAS_ARGBCOLORMATRIXROW_SSSE3
+#define HAS_ARGBCOLORTABLEROW_X86
+#define HAS_ARGBCOPYALPHAROW_SSE2
+#define HAS_ARGBCOPYYTOALPHAROW_SSE2
+#define HAS_ARGBGRAYROW_SSSE3
+#define HAS_ARGBLUMACOLORTABLEROW_SSSE3
+#define HAS_ARGBMIRRORROW_SSE2
+#define HAS_ARGBMULTIPLYROW_SSE2
+#define HAS_ARGBPOLYNOMIALROW_SSE2
+#define HAS_ARGBQUANTIZEROW_SSE2
+#define HAS_ARGBSEPIAROW_SSSE3
+#define HAS_ARGBSHADEROW_SSE2
+#define HAS_ARGBSUBTRACTROW_SSE2
+#define HAS_ARGBUNATTENUATEROW_SSE2
+#define HAS_COMPUTECUMULATIVESUMROW_SSE2
+#define HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
+#define HAS_INTERPOLATEROW_SSE2
+#define HAS_INTERPOLATEROW_SSSE3
+#define HAS_RGBCOLORTABLEROW_X86
+#define HAS_SOBELROW_SSE2
+#define HAS_SOBELTOPLANEROW_SSE2
+#define HAS_SOBELXROW_SSE2
+#define HAS_SOBELXYROW_SSE2
+#define HAS_SOBELYROW_SSE2
+#endif
+
+// The following are available on x64 Visual C and clangcl.
+#if !defined(LIBYUV_DISABLE_X86) && defined (_M_X64) && \
+ (!defined(__clang__) || defined(__SSSE3__))
+#define HAS_I422TOARGBROW_SSSE3
+#endif
+
+// GCC >= 4.7.0 required for AVX2.
+#if defined(__GNUC__) && (defined(__x86_64__) || defined(__i386__))
+#if (__GNUC__ > 4) || (__GNUC__ == 4 && (__GNUC_MINOR__ >= 7))
+#define GCC_HAS_AVX2 1
+#endif // GNUC >= 4.7
+#endif // __GNUC__
+
+// clang >= 3.4.0 required for AVX2.
+#if defined(__clang__) && (defined(__x86_64__) || defined(__i386__))
+#if (__clang_major__ > 3) || (__clang_major__ == 3 && (__clang_minor__ >= 4))
+#define CLANG_HAS_AVX2 1
+#endif // clang >= 3.4
+#endif // __clang__
+
+// Visual C 2012 required for AVX2.
+#if defined(_M_IX86) && !defined(__clang__) && \
+ defined(_MSC_VER) && _MSC_VER >= 1700
+#define VISUALC_HAS_AVX2 1
+#endif // VisualStudio >= 2012
+
+// The following are available require VS2012. Port to GCC.
+#if !defined(LIBYUV_DISABLE_X86) && defined(VISUALC_HAS_AVX2)
+#define HAS_ARGB1555TOARGBROW_AVX2
+#define HAS_ARGB4444TOARGBROW_AVX2
+#define HAS_ARGBTOARGB1555ROW_AVX2
+#define HAS_ARGBTOARGB4444ROW_AVX2
+#define HAS_ARGBTORGB565DITHERROW_AVX2
+#define HAS_ARGBTORGB565DITHERROW_SSE2
+#define HAS_ARGBTORGB565ROW_AVX2
+#define HAS_I411TOARGBROW_AVX2
+#define HAS_I422TOARGB1555ROW_AVX2
+#define HAS_I422TOARGB4444ROW_AVX2
+#define HAS_I422TORGB565ROW_AVX2
+#define HAS_I444TOARGBROW_AVX2
+#define HAS_J400TOARGBROW_AVX2
+#define HAS_NV12TOARGBROW_AVX2
+#define HAS_NV12TORGB565ROW_AVX2
+#define HAS_NV21TOARGBROW_AVX2
+#define HAS_NV21TORGB565ROW_AVX2
+#define HAS_RGB565TOARGBROW_AVX2
+#endif
+
+// The following are available on all x86 platforms, but
+// require VS2012, clang 3.4 or gcc 4.7.
+// The code supports NaCL but requires a new compiler and validator.
+#if !defined(LIBYUV_DISABLE_X86) && (defined(VISUALC_HAS_AVX2) || \
+ defined(CLANG_HAS_AVX2) || defined(GCC_HAS_AVX2))
+#define HAS_ARGBCOPYALPHAROW_AVX2
+#define HAS_ARGBCOPYYTOALPHAROW_AVX2
+#define HAS_ARGBMIRRORROW_AVX2
+#define HAS_ARGBPOLYNOMIALROW_AVX2
+#define HAS_ARGBSHUFFLEROW_AVX2
+#define HAS_ARGBTOUVROW_AVX2
+#define HAS_ARGBTOYJROW_AVX2
+#define HAS_ARGBTOYROW_AVX2
+#define HAS_COPYROW_AVX
+#define HAS_I400TOARGBROW_AVX2
+#define HAS_I422TOABGRROW_AVX2
+#define HAS_I422TOARGBROW_AVX2
+#define HAS_I422TOBGRAROW_AVX2
+#define HAS_I422TORAWROW_AVX2
+#define HAS_I422TORGB24ROW_AVX2
+#define HAS_I422TORGBAROW_AVX2
+#define HAS_INTERPOLATEROW_AVX2
+#define HAS_J422TOARGBROW_AVX2
+#define HAS_MERGEUVROW_AVX2
+#define HAS_MIRRORROW_AVX2
+#define HAS_SPLITUVROW_AVX2
+#define HAS_UYVYTOARGBROW_AVX2
+#define HAS_UYVYTOUV422ROW_AVX2
+#define HAS_UYVYTOUVROW_AVX2
+#define HAS_UYVYTOYROW_AVX2
+#define HAS_YUY2TOARGBROW_AVX2
+#define HAS_YUY2TOUV422ROW_AVX2
+#define HAS_YUY2TOUVROW_AVX2
+#define HAS_YUY2TOYROW_AVX2
+
+// Effects:
+#define HAS_ARGBADDROW_AVX2
+#define HAS_ARGBATTENUATEROW_AVX2
+#define HAS_ARGBMULTIPLYROW_AVX2
+#define HAS_ARGBSUBTRACTROW_AVX2
+#define HAS_ARGBUNATTENUATEROW_AVX2
+#endif
+
+// The following are disabled when SSSE3 is available:
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__)) && \
+ !defined(LIBYUV_SSSE3_ONLY)
+#define HAS_ARGBATTENUATEROW_SSE2
+#define HAS_ARGBBLENDROW_SSE2
+#define HAS_MIRRORROW_SSE2
+#endif
+
+// The following are available on Neon platforms:
+#if !defined(LIBYUV_DISABLE_NEON) && \
+ (defined(__aarch64__) || defined(__ARM_NEON__) || defined(LIBYUV_NEON))
+#define HAS_ABGRTOUVROW_NEON
+#define HAS_ABGRTOYROW_NEON
+#define HAS_ARGB1555TOARGBROW_NEON
+#define HAS_ARGB1555TOUVROW_NEON
+#define HAS_ARGB1555TOYROW_NEON
+#define HAS_ARGB4444TOARGBROW_NEON
+#define HAS_ARGB4444TOUVROW_NEON
+#define HAS_ARGB4444TOYROW_NEON
+#define HAS_ARGBTOARGB1555ROW_NEON
+#define HAS_ARGBTOARGB4444ROW_NEON
+#define HAS_ARGBTORAWROW_NEON
+#define HAS_ARGBTORGB24ROW_NEON
+#define HAS_ARGBTORGB565ROW_NEON
+#define HAS_ARGBTOUV411ROW_NEON
+#define HAS_ARGBTOUV422ROW_NEON
+#define HAS_ARGBTOUV444ROW_NEON
+#define HAS_ARGBTOUVJROW_NEON
+#define HAS_ARGBTOUVROW_NEON
+#define HAS_ARGBTOYJROW_NEON
+#define HAS_ARGBTOYROW_NEON
+#define HAS_BGRATOUVROW_NEON
+#define HAS_BGRATOYROW_NEON
+#define HAS_COPYROW_NEON
+#define HAS_J400TOARGBROW_NEON
+#define HAS_I411TOARGBROW_NEON
+#define HAS_I422TOABGRROW_NEON
+#define HAS_I422TOARGB1555ROW_NEON
+#define HAS_I422TOARGB4444ROW_NEON
+#define HAS_I422TOARGBROW_NEON
+#define HAS_I422TOBGRAROW_NEON
+#define HAS_I422TORAWROW_NEON
+#define HAS_I422TORGB24ROW_NEON
+#define HAS_I422TORGB565ROW_NEON
+#define HAS_I422TORGBAROW_NEON
+#define HAS_I422TOUYVYROW_NEON
+#define HAS_I422TOYUY2ROW_NEON
+#define HAS_I444TOARGBROW_NEON
+#define HAS_MERGEUVROW_NEON
+#define HAS_MIRRORROW_NEON
+#define HAS_MIRRORUVROW_NEON
+#define HAS_NV12TOARGBROW_NEON
+#define HAS_NV12TORGB565ROW_NEON
+#define HAS_NV21TOARGBROW_NEON
+#define HAS_NV21TORGB565ROW_NEON
+#define HAS_RAWTOARGBROW_NEON
+#define HAS_RAWTOUVROW_NEON
+#define HAS_RAWTOYROW_NEON
+#define HAS_RGB24TOARGBROW_NEON
+#define HAS_RGB24TOUVROW_NEON
+#define HAS_RGB24TOYROW_NEON
+#define HAS_RGB565TOARGBROW_NEON
+#define HAS_RGB565TOUVROW_NEON
+#define HAS_RGB565TOYROW_NEON
+#define HAS_RGBATOUVROW_NEON
+#define HAS_RGBATOYROW_NEON
+#define HAS_SETROW_NEON
+#define HAS_ARGBSETROW_NEON
+#define HAS_SPLITUVROW_NEON
+#define HAS_UYVYTOARGBROW_NEON
+#define HAS_UYVYTOUV422ROW_NEON
+#define HAS_UYVYTOUVROW_NEON
+#define HAS_UYVYTOYROW_NEON
+#define HAS_I400TOARGBROW_NEON
+#define HAS_YUY2TOARGBROW_NEON
+#define HAS_YUY2TOUV422ROW_NEON
+#define HAS_YUY2TOUVROW_NEON
+#define HAS_YUY2TOYROW_NEON
+#define HAS_ARGBTORGB565DITHERROW_NEON
+
+// Effects:
+#define HAS_ARGBADDROW_NEON
+#define HAS_ARGBATTENUATEROW_NEON
+#define HAS_ARGBBLENDROW_NEON
+#define HAS_ARGBGRAYROW_NEON
+#define HAS_ARGBMIRRORROW_NEON
+#define HAS_ARGBMULTIPLYROW_NEON
+#define HAS_ARGBQUANTIZEROW_NEON
+#define HAS_ARGBSEPIAROW_NEON
+#define HAS_ARGBSHADEROW_NEON
+#define HAS_ARGBSUBTRACTROW_NEON
+#define HAS_INTERPOLATEROW_NEON
+#define HAS_SOBELROW_NEON
+#define HAS_SOBELTOPLANEROW_NEON
+#define HAS_SOBELXROW_NEON
+#define HAS_SOBELXYROW_NEON
+#define HAS_SOBELYROW_NEON
+#define HAS_ARGBCOLORMATRIXROW_NEON
+#define HAS_ARGBSHUFFLEROW_NEON
+#endif
+
+// The following are available on Mips platforms:
+#if !defined(LIBYUV_DISABLE_MIPS) && defined(__mips__) && \
+ (_MIPS_SIM == _MIPS_SIM_ABI32) && (__mips_isa_rev < 6)
+#define HAS_COPYROW_MIPS
+#if defined(__mips_dsp) && (__mips_dsp_rev >= 2)
+#define HAS_I422TOABGRROW_MIPS_DSPR2
+#define HAS_I422TOARGBROW_MIPS_DSPR2
+#define HAS_I422TOBGRAROW_MIPS_DSPR2
+#define HAS_INTERPOLATEROW_MIPS_DSPR2
+#define HAS_MIRRORROW_MIPS_DSPR2
+#define HAS_MIRRORUVROW_MIPS_DSPR2
+#define HAS_SPLITUVROW_MIPS_DSPR2
+#endif
+#endif
+
+#if defined(_MSC_VER) && !defined(__CLR_VER)
+#define SIMD_ALIGNED(var) __declspec(align(16)) var
+#define SIMD_ALIGNED32(var) __declspec(align(64)) var
+typedef __declspec(align(16)) int16 vec16[8];
+typedef __declspec(align(16)) int32 vec32[4];
+typedef __declspec(align(16)) int8 vec8[16];
+typedef __declspec(align(16)) uint16 uvec16[8];
+typedef __declspec(align(16)) uint32 uvec32[4];
+typedef __declspec(align(16)) uint8 uvec8[16];
+typedef __declspec(align(32)) int16 lvec16[16];
+typedef __declspec(align(32)) int32 lvec32[8];
+typedef __declspec(align(32)) int8 lvec8[32];
+typedef __declspec(align(32)) uint16 ulvec16[16];
+typedef __declspec(align(32)) uint32 ulvec32[8];
+typedef __declspec(align(32)) uint8 ulvec8[32];
+#elif defined(__GNUC__)
+// Caveat GCC 4.2 to 4.7 have a known issue using vectors with const.
+#define SIMD_ALIGNED(var) var __attribute__((aligned(16)))
+#define SIMD_ALIGNED32(var) var __attribute__((aligned(64)))
+typedef int16 __attribute__((vector_size(16))) vec16;
+typedef int32 __attribute__((vector_size(16))) vec32;
+typedef int8 __attribute__((vector_size(16))) vec8;
+typedef uint16 __attribute__((vector_size(16))) uvec16;
+typedef uint32 __attribute__((vector_size(16))) uvec32;
+typedef uint8 __attribute__((vector_size(16))) uvec8;
+typedef int16 __attribute__((vector_size(32))) lvec16;
+typedef int32 __attribute__((vector_size(32))) lvec32;
+typedef int8 __attribute__((vector_size(32))) lvec8;
+typedef uint16 __attribute__((vector_size(32))) ulvec16;
+typedef uint32 __attribute__((vector_size(32))) ulvec32;
+typedef uint8 __attribute__((vector_size(32))) ulvec8;
+#else
+#define SIMD_ALIGNED(var) var
+#define SIMD_ALIGNED32(var) var
+typedef int16 vec16[8];
+typedef int32 vec32[4];
+typedef int8 vec8[16];
+typedef uint16 uvec16[8];
+typedef uint32 uvec32[4];
+typedef uint8 uvec8[16];
+typedef int16 lvec16[16];
+typedef int32 lvec32[8];
+typedef int8 lvec8[32];
+typedef uint16 ulvec16[16];
+typedef uint32 ulvec32[8];
+typedef uint8 ulvec8[32];
+#endif
+
+#if defined(__APPLE__) || defined(__x86_64__) || defined(__llvm__)
+#define OMITFP
+#else
+#define OMITFP __attribute__((optimize("omit-frame-pointer")))
+#endif
+
+// NaCL macros for GCC x86 and x64.
+#if defined(__native_client__)
+#define LABELALIGN ".p2align 5\n"
+#else
+#define LABELALIGN
+#endif
+#if defined(__native_client__) && defined(__x86_64__)
+// r14 is used for MEMOP macros.
+#define NACL_R14 "r14",
+#define BUNDLELOCK ".bundle_lock\n"
+#define BUNDLEUNLOCK ".bundle_unlock\n"
+#define MEMACCESS(base) "%%nacl:(%%r15,%q" #base ")"
+#define MEMACCESS2(offset, base) "%%nacl:" #offset "(%%r15,%q" #base ")"
+#define MEMLEA(offset, base) #offset "(%q" #base ")"
+#define MEMLEA3(offset, index, scale) \
+ #offset "(,%q" #index "," #scale ")"
+#define MEMLEA4(offset, base, index, scale) \
+ #offset "(%q" #base ",%q" #index "," #scale ")"
+#define MEMMOVESTRING(s, d) "%%nacl:(%q" #s "),%%nacl:(%q" #d "), %%r15"
+#define MEMSTORESTRING(reg, d) "%%" #reg ",%%nacl:(%q" #d "), %%r15"
+#define MEMOPREG(opcode, offset, base, index, scale, reg) \
+ BUNDLELOCK \
+ "lea " #offset "(%q" #base ",%q" #index "," #scale "),%%r14d\n" \
+ #opcode " (%%r15,%%r14),%%" #reg "\n" \
+ BUNDLEUNLOCK
+#define MEMOPMEM(opcode, reg, offset, base, index, scale) \
+ BUNDLELOCK \
+ "lea " #offset "(%q" #base ",%q" #index "," #scale "),%%r14d\n" \
+ #opcode " %%" #reg ",(%%r15,%%r14)\n" \
+ BUNDLEUNLOCK
+#define MEMOPARG(opcode, offset, base, index, scale, arg) \
+ BUNDLELOCK \
+ "lea " #offset "(%q" #base ",%q" #index "," #scale "),%%r14d\n" \
+ #opcode " (%%r15,%%r14),%" #arg "\n" \
+ BUNDLEUNLOCK
+#define VMEMOPREG(opcode, offset, base, index, scale, reg1, reg2) \
+ BUNDLELOCK \
+ "lea " #offset "(%q" #base ",%q" #index "," #scale "),%%r14d\n" \
+ #opcode " (%%r15,%%r14),%%" #reg1 ",%%" #reg2 "\n" \
+ BUNDLEUNLOCK
+#define VEXTOPMEM(op, sel, reg, offset, base, index, scale) \
+ BUNDLELOCK \
+ "lea " #offset "(%q" #base ",%q" #index "," #scale "),%%r14d\n" \
+ #op " $" #sel ",%%" #reg ",(%%r15,%%r14)\n" \
+ BUNDLEUNLOCK
+#else // defined(__native_client__) && defined(__x86_64__)
+#define NACL_R14
+#define BUNDLEALIGN
+#define MEMACCESS(base) "(%" #base ")"
+#define MEMACCESS2(offset, base) #offset "(%" #base ")"
+#define MEMLEA(offset, base) #offset "(%" #base ")"
+#define MEMLEA3(offset, index, scale) \
+ #offset "(,%" #index "," #scale ")"
+#define MEMLEA4(offset, base, index, scale) \
+ #offset "(%" #base ",%" #index "," #scale ")"
+#define MEMMOVESTRING(s, d)
+#define MEMSTORESTRING(reg, d)
+#define MEMOPREG(opcode, offset, base, index, scale, reg) \
+ #opcode " " #offset "(%" #base ",%" #index "," #scale "),%%" #reg "\n"
+#define MEMOPMEM(opcode, reg, offset, base, index, scale) \
+ #opcode " %%" #reg ","#offset "(%" #base ",%" #index "," #scale ")\n"
+#define MEMOPARG(opcode, offset, base, index, scale, arg) \
+ #opcode " " #offset "(%" #base ",%" #index "," #scale "),%" #arg "\n"
+#define VMEMOPREG(opcode, offset, base, index, scale, reg1, reg2) \
+ #opcode " " #offset "(%" #base ",%" #index "," #scale "),%%" #reg1 ",%%" \
+ #reg2 "\n"
+#define VEXTOPMEM(op, sel, reg, offset, base, index, scale) \
+ #op " $" #sel ",%%" #reg ","#offset "(%" #base ",%" #index "," #scale ")\n"
+#endif // defined(__native_client__) && defined(__x86_64__)
+
+#if defined(__arm__) || defined(__aarch64__)
+#undef MEMACCESS
+#if defined(__native_client__)
+#define MEMACCESS(base) ".p2align 3\nbic %" #base ", #0xc0000000\n"
+#else
+#define MEMACCESS(base)
+#endif
+#endif
+
+void I444ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_bgra,
+ int width);
+void I422ToABGRRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_abgr,
+ int width);
+void I422ToRGBARow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToRGB24Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width);
+void I422ToRAWRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width);
+void I422ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width);
+void I422ToARGB1555Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb1555,
+ int width);
+void I422ToARGB4444Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width);
+void NV12ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_rgb565,
+ int width);
+void NV21ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_rgb565,
+ int width);
+void YUY2ToARGBRow_NEON(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_NEON(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+
+void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYRow_Any_AVX2(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_Any_AVX2(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix);
+void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix);
+void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix);
+void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix);
+void RGB24ToYRow_SSSE3(const uint8* src_rgb24, uint8* dst_y, int pix);
+void RAWToYRow_SSSE3(const uint8* src_raw, uint8* dst_y, int pix);
+void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix);
+void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix);
+void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix);
+void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix);
+void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int pix);
+void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix);
+void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix);
+void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix);
+void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix);
+void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix);
+void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix);
+void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix);
+void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix);
+void ARGBToYRow_C(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_C(const uint8* src_argb, uint8* dst_y, int pix);
+void BGRAToYRow_C(const uint8* src_bgra, uint8* dst_y, int pix);
+void ABGRToYRow_C(const uint8* src_abgr, uint8* dst_y, int pix);
+void RGBAToYRow_C(const uint8* src_rgba, uint8* dst_y, int pix);
+void RGB24ToYRow_C(const uint8* src_rgb24, uint8* dst_y, int pix);
+void RAWToYRow_C(const uint8* src_raw, uint8* dst_y, int pix);
+void RGB565ToYRow_C(const uint8* src_rgb565, uint8* dst_y, int pix);
+void ARGB1555ToYRow_C(const uint8* src_argb1555, uint8* dst_y, int pix);
+void ARGB4444ToYRow_C(const uint8* src_argb4444, uint8* dst_y, int pix);
+void ARGBToYRow_Any_SSSE3(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_Any_SSSE3(const uint8* src_argb, uint8* dst_y, int pix);
+void BGRAToYRow_Any_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix);
+void ABGRToYRow_Any_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix);
+void RGBAToYRow_Any_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix);
+void RGB24ToYRow_Any_SSSE3(const uint8* src_rgb24, uint8* dst_y, int pix);
+void RAWToYRow_Any_SSSE3(const uint8* src_raw, uint8* dst_y, int pix);
+void ARGBToYRow_Any_NEON(const uint8* src_argb, uint8* dst_y, int pix);
+void ARGBToYJRow_Any_NEON(const uint8* src_argb, uint8* dst_y, int pix);
+void BGRAToYRow_Any_NEON(const uint8* src_bgra, uint8* dst_y, int pix);
+void ABGRToYRow_Any_NEON(const uint8* src_abgr, uint8* dst_y, int pix);
+void RGBAToYRow_Any_NEON(const uint8* src_rgba, uint8* dst_y, int pix);
+void RGB24ToYRow_Any_NEON(const uint8* src_rgb24, uint8* dst_y, int pix);
+void RAWToYRow_Any_NEON(const uint8* src_raw, uint8* dst_y, int pix);
+void RGB565ToYRow_Any_NEON(const uint8* src_rgb565, uint8* dst_y, int pix);
+void ARGB1555ToYRow_Any_NEON(const uint8* src_argb1555, uint8* dst_y, int pix);
+void ARGB4444ToYRow_Any_NEON(const uint8* src_argb4444, uint8* dst_y, int pix);
+
+void ARGBToUVRow_AVX2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVRow_Any_AVX2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVRow_SSSE3(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVJRow_SSSE3(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void BGRAToUVRow_SSSE3(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int width);
+void ABGRToUVRow_SSSE3(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int width);
+void RGBAToUVRow_SSSE3(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVRow_Any_SSSE3(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVJRow_Any_SSSE3(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void BGRAToUVRow_Any_SSSE3(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int width);
+void ABGRToUVRow_Any_SSSE3(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int width);
+void RGBAToUVRow_Any_SSSE3(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUV444Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix);
+void ARGBToUV422Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix);
+void ARGBToUV411Row_Any_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix);
+void ARGBToUVRow_Any_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGBToUVJRow_Any_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix);
+void BGRAToUVRow_Any_NEON(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ABGRToUVRow_Any_NEON(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RGBAToUVRow_Any_NEON(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RGB24ToUVRow_Any_NEON(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RAWToUVRow_Any_NEON(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_u, uint8* dst_v, int pix);
+void RGB565ToUVRow_Any_NEON(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGB1555ToUVRow_Any_NEON(const uint8* src_argb1555,
+ int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGB4444ToUVRow_Any_NEON(const uint8* src_argb4444,
+ int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int pix);
+void ARGBToUVRow_C(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVJRow_C(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void BGRAToUVRow_C(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int width);
+void ABGRToUVRow_C(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int width);
+void RGBAToUVRow_C(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int width);
+void RGB24ToUVRow_C(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_u, uint8* dst_v, int width);
+void RAWToUVRow_C(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_u, uint8* dst_v, int width);
+void RGB565ToUVRow_C(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGB1555ToUVRow_C(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGB4444ToUVRow_C(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int width);
+
+void ARGBToUV444Row_SSSE3(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUV444Row_Any_SSSE3(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+
+void ARGBToUV422Row_SSSE3(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUV422Row_Any_SSSE3(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+
+void ARGBToUV444Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUV422Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUV411Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+void ARGBToUVJ422Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width);
+
+void MirrorRow_AVX2(const uint8* src, uint8* dst, int width);
+void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width);
+void MirrorRow_SSE2(const uint8* src, uint8* dst, int width);
+void MirrorRow_NEON(const uint8* src, uint8* dst, int width);
+void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width);
+void MirrorRow_C(const uint8* src, uint8* dst, int width);
+void MirrorRow_Any_AVX2(const uint8* src, uint8* dst, int width);
+void MirrorRow_Any_SSSE3(const uint8* src, uint8* dst, int width);
+void MirrorRow_Any_SSE2(const uint8* src, uint8* dst, int width);
+void MirrorRow_Any_NEON(const uint8* src, uint8* dst, int width);
+
+void MirrorUVRow_SSSE3(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
+void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
+void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
+void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width);
+
+void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width);
+void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width);
+void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width);
+void ARGBMirrorRow_C(const uint8* src, uint8* dst, int width);
+void ARGBMirrorRow_Any_AVX2(const uint8* src, uint8* dst, int width);
+void ARGBMirrorRow_Any_SSE2(const uint8* src, uint8* dst, int width);
+void ARGBMirrorRow_Any_NEON(const uint8* src, uint8* dst, int width);
+
+void SplitUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix);
+void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix);
+void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix);
+void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix);
+void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int pix);
+void SplitUVRow_Any_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int pix);
+void SplitUVRow_Any_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int pix);
+void SplitUVRow_Any_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int pix);
+void SplitUVRow_Any_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int pix);
+
+void MergeUVRow_C(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+void MergeUVRow_Any_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+void MergeUVRow_Any_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+void MergeUVRow_Any_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width);
+
+void CopyRow_SSE2(const uint8* src, uint8* dst, int count);
+void CopyRow_AVX(const uint8* src, uint8* dst, int count);
+void CopyRow_ERMS(const uint8* src, uint8* dst, int count);
+void CopyRow_NEON(const uint8* src, uint8* dst, int count);
+void CopyRow_MIPS(const uint8* src, uint8* dst, int count);
+void CopyRow_C(const uint8* src, uint8* dst, int count);
+void CopyRow_Any_SSE2(const uint8* src, uint8* dst, int count);
+void CopyRow_Any_AVX(const uint8* src, uint8* dst, int count);
+void CopyRow_Any_NEON(const uint8* src, uint8* dst, int count);
+
+void CopyRow_16_C(const uint16* src, uint16* dst, int count);
+
+void ARGBCopyAlphaRow_C(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBCopyAlphaRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBCopyAlphaRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width);
+
+void ARGBCopyYToAlphaRow_C(const uint8* src_y, uint8* dst_argb, int width);
+void ARGBCopyYToAlphaRow_SSE2(const uint8* src_y, uint8* dst_argb, int width);
+void ARGBCopyYToAlphaRow_AVX2(const uint8* src_y, uint8* dst_argb, int width);
+
+void SetRow_C(uint8* dst, uint8 v8, int count);
+void SetRow_X86(uint8* dst, uint8 v8, int count);
+void SetRow_ERMS(uint8* dst, uint8 v8, int count);
+void SetRow_NEON(uint8* dst, uint8 v8, int count);
+void SetRow_Any_X86(uint8* dst, uint8 v8, int count);
+void SetRow_Any_NEON(uint8* dst, uint8 v8, int count);
+
+void ARGBSetRow_C(uint8* dst_argb, uint32 v32, int count);
+void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count);
+void ARGBSetRow_NEON(uint8* dst_argb, uint32 v32, int count);
+void ARGBSetRow_Any_NEON(uint8* dst_argb, uint32 v32, int count);
+
+// ARGBShufflers for BGRAToARGB etc.
+void ARGBShuffleRow_C(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_Any_SSE2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_Any_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_Any_AVX2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+void ARGBShuffleRow_Any_NEON(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix);
+
+void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix);
+void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix);
+void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb, int pix);
+void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
+ int pix);
+void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
+ int pix);
+void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb, int pix);
+void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb,
+ int pix);
+void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb,
+ int pix);
+
+void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix);
+void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix);
+void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix);
+void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb,
+ int pix);
+void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb,
+ int pix);
+void RGB24ToARGBRow_C(const uint8* src_rgb24, uint8* dst_argb, int pix);
+void RAWToARGBRow_C(const uint8* src_raw, uint8* dst_argb, int pix);
+void RGB565ToARGBRow_C(const uint8* src_rgb, uint8* dst_argb, int pix);
+void ARGB1555ToARGBRow_C(const uint8* src_argb, uint8* dst_argb, int pix);
+void ARGB4444ToARGBRow_C(const uint8* src_argb, uint8* dst_argb, int pix);
+void RGB24ToARGBRow_Any_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix);
+void RAWToARGBRow_Any_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix);
+
+void RGB565ToARGBRow_Any_SSE2(const uint8* src_rgb565, uint8* dst_argb,
+ int pix);
+void ARGB1555ToARGBRow_Any_SSE2(const uint8* src_argb1555, uint8* dst_argb,
+ int pix);
+void ARGB4444ToARGBRow_Any_SSE2(const uint8* src_argb4444, uint8* dst_argb,
+ int pix);
+void RGB565ToARGBRow_Any_AVX2(const uint8* src_rgb565, uint8* dst_argb,
+ int pix);
+void ARGB1555ToARGBRow_Any_AVX2(const uint8* src_argb1555, uint8* dst_argb,
+ int pix);
+void ARGB4444ToARGBRow_Any_AVX2(const uint8* src_argb4444, uint8* dst_argb,
+ int pix);
+
+void RGB24ToARGBRow_Any_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix);
+void RAWToARGBRow_Any_NEON(const uint8* src_raw, uint8* dst_argb, int pix);
+void RGB565ToARGBRow_Any_NEON(const uint8* src_rgb565, uint8* dst_argb,
+ int pix);
+void ARGB1555ToARGBRow_Any_NEON(const uint8* src_argb1555, uint8* dst_argb,
+ int pix);
+void ARGB4444ToARGBRow_Any_NEON(const uint8* src_argb4444, uint8* dst_argb,
+ int pix);
+
+void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix);
+
+void ARGBToRGB565DitherRow_C(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix);
+void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix);
+void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix);
+
+void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix);
+
+void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int width);
+
+void ARGBToRGBARow_C(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB24Row_C(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRAWRow_C(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565Row_C(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_C(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_C(const uint8* src_argb, uint8* dst_rgb, int pix);
+
+void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix);
+void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix);
+void J400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int pix);
+void J400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int pix);
+void J400ToARGBRow_Any_SSE2(const uint8* src_y, uint8* dst_argb, int pix);
+void J400ToARGBRow_Any_AVX2(const uint8* src_y, uint8* dst_argb, int pix);
+void J400ToARGBRow_Any_NEON(const uint8* src_y, uint8* dst_argb, int pix);
+
+void I444ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void NV12ToARGBRow_C(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToRGB565Row_C(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_C(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_C(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void YUY2ToARGBRow_C(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_C(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+void J422ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_bgra,
+ int width);
+void I422ToABGRRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_abgr,
+ int width);
+void I422ToRGBARow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToRGB24Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width);
+void I422ToRAWRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width);
+void I422ToARGB4444Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width);
+void I422ToARGB1555Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width);
+void I422ToRGB565Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width);
+void I422ToARGBRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGBARow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToABGRRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I444ToARGBRow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I444ToARGBRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void NV12ToARGBRow_SSSE3(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_SSSE3(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToARGBRow_AVX2(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_AVX2(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_SSSE3(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToRGB565Row_SSSE3(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_AVX2(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToRGB565Row_AVX2(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_SSSE3(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+void YUY2ToARGBRow_AVX2(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_AVX2(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+void J422ToARGBRow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void J422ToARGBRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_bgra,
+ int width);
+void I422ToABGRRow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_abgr,
+ int width);
+void I422ToRGBARow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToARGB4444Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGB4444Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGB1555Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGB1555Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGB565Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGB565Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGB24Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width);
+void I422ToRGB24Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width);
+void I422ToRAWRow_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width);
+void I422ToRAWRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width);
+void I422ToARGBRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGBARow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToABGRRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I444ToARGBRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I444ToARGBRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void NV12ToARGBRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToARGBRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_Any_SSSE3(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToRGB565Row_Any_SSSE3(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_Any_AVX2(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToRGB565Row_Any_AVX2(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* dst_argb,
+ int width);
+void YUY2ToARGBRow_Any_SSSE3(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_Any_SSSE3(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+void YUY2ToARGBRow_Any_AVX2(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_Any_AVX2(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+void J422ToARGBRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void J422ToARGBRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_bgra,
+ int width);
+void I422ToABGRRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_abgr,
+ int width);
+void I422ToRGBARow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToARGB4444Row_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToARGB4444Row_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToARGB1555Row_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToARGB1555Row_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToRGB565Row_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToRGB565Row_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width);
+void I422ToRGB24Row_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGB24Row_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRAWRow_Any_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRAWRow_Any_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+
+void I400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width);
+void I400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int width);
+void I400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int width);
+void I400ToARGBRow_NEON(const uint8* src_y, uint8* dst_argb, int width);
+void I400ToARGBRow_Any_SSE2(const uint8* src_y, uint8* dst_argb, int width);
+void I400ToARGBRow_Any_AVX2(const uint8* src_y, uint8* dst_argb, int width);
+void I400ToARGBRow_Any_NEON(const uint8* src_y, uint8* dst_argb, int width);
+
+// ARGB preattenuated alpha blend.
+void ARGBBlendRow_SSSE3(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBBlendRow_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBBlendRow_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBBlendRow_C(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+
+// ARGB multiply images. Same API as Blend, but these require
+// pointer and width alignment for SSE2.
+void ARGBMultiplyRow_C(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBMultiplyRow_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBMultiplyRow_Any_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBMultiplyRow_AVX2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBMultiplyRow_Any_AVX2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBMultiplyRow_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBMultiplyRow_Any_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+
+// ARGB add images.
+void ARGBAddRow_C(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBAddRow_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBAddRow_Any_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBAddRow_AVX2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBAddRow_Any_AVX2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBAddRow_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBAddRow_Any_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+
+// ARGB subtract images. Same API as Blend, but these require
+// pointer and width alignment for SSE2.
+void ARGBSubtractRow_C(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBSubtractRow_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBSubtractRow_Any_SSE2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBSubtractRow_AVX2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBSubtractRow_Any_AVX2(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBSubtractRow_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+void ARGBSubtractRow_Any_NEON(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width);
+
+void ARGBToRGB24Row_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRAWRow_Any_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_Any_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix);
+
+void ARGBToRGB565DitherRow_Any_SSE2(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix);
+void ARGBToRGB565DitherRow_Any_AVX2(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix);
+
+void ARGBToRGB565Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_Any_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix);
+
+void ARGBToRGB24Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRAWRow_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB1555Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToARGB4444Row_Any_NEON(const uint8* src_argb, uint8* dst_rgb, int pix);
+void ARGBToRGB565DitherRow_Any_NEON(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int width);
+
+void I444ToARGBRow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I411ToARGBRow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToABGRRow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGBARow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGB24Row_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRAWRow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGB4444Row_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGB1555Row_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToRGB565Row_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void NV12ToARGBRow_Any_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToARGBRow_Any_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV12ToRGB565Row_Any_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void NV21ToRGB565Row_Any_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width);
+void YUY2ToARGBRow_Any_NEON(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width);
+void UYVYToARGBRow_Any_NEON(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_MIPS_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_MIPS_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToABGRRow_MIPS_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToARGBRow_MIPS_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToBGRARow_MIPS_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+void I422ToABGRRow_MIPS_DSPR2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width);
+
+void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_NEON(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToYRow_C(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_C(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_C(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToYRow_Any_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_Any_AVX2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_Any_AVX2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToYRow_Any_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_Any_SSE2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_Any_SSE2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToYRow_Any_NEON(const uint8* src_yuy2, uint8* dst_y, int pix);
+void YUY2ToUVRow_Any_NEON(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void YUY2ToUV422Row_Any_NEON(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_NEON(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+
+void UYVYToYRow_C(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_C(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_C(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_Any_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_Any_AVX2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_Any_AVX2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_Any_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_Any_SSE2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_Any_SSE2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToYRow_Any_NEON(const uint8* src_uyvy, uint8* dst_y, int pix);
+void UYVYToUVRow_Any_NEON(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+void UYVYToUV422Row_Any_NEON(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix);
+
+void I422ToYUY2Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width);
+void I422ToUYVYRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width);
+void I422ToYUY2Row_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width);
+void I422ToUYVYRow_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width);
+void I422ToYUY2Row_Any_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width);
+void I422ToUYVYRow_Any_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width);
+void I422ToYUY2Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width);
+void I422ToUYVYRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width);
+void I422ToYUY2Row_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width);
+void I422ToUYVYRow_Any_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width);
+
+// Effects related row functions.
+void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBAttenuateRow_NEON(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBAttenuateRow_Any_SSE2(const uint8* src_argb, uint8* dst_argb,
+ int width);
+void ARGBAttenuateRow_Any_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ int width);
+void ARGBAttenuateRow_Any_AVX2(const uint8* src_argb, uint8* dst_argb,
+ int width);
+void ARGBAttenuateRow_Any_NEON(const uint8* src_argb, uint8* dst_argb,
+ int width);
+
+// Inverse table for unattenuate, shared by C and SSE2.
+extern const uint32 fixed_invtbl8[256];
+void ARGBUnattenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBUnattenuateRow_Any_SSE2(const uint8* src_argb, uint8* dst_argb,
+ int width);
+void ARGBUnattenuateRow_Any_AVX2(const uint8* src_argb, uint8* dst_argb,
+ int width);
+
+void ARGBGrayRow_C(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width);
+void ARGBGrayRow_NEON(const uint8* src_argb, uint8* dst_argb, int width);
+
+void ARGBSepiaRow_C(uint8* dst_argb, int width);
+void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width);
+void ARGBSepiaRow_NEON(uint8* dst_argb, int width);
+
+void ARGBColorMatrixRow_C(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width);
+void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width);
+void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width);
+
+void ARGBColorTableRow_C(uint8* dst_argb, const uint8* table_argb, int width);
+void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width);
+
+void RGBColorTableRow_C(uint8* dst_argb, const uint8* table_argb, int width);
+void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width);
+
+void ARGBQuantizeRow_C(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width);
+void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width);
+void ARGBQuantizeRow_NEON(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width);
+
+void ARGBShadeRow_C(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value);
+void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value);
+void ARGBShadeRow_NEON(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value);
+
+// Used for blur.
+void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
+ int width, int area, uint8* dst, int count);
+void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
+ const int32* previous_cumsum, int width);
+
+void CumulativeSumToAverageRow_C(const int32* topleft, const int32* botleft,
+ int width, int area, uint8* dst, int count);
+void ComputeCumulativeSumRow_C(const uint8* row, int32* cumsum,
+ const int32* previous_cumsum, int width);
+
+LIBYUV_API
+void ARGBAffineRow_C(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* uv_dudv, int width);
+LIBYUV_API
+void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* uv_dudv, int width);
+
+// Used for I420Scale, ARGBScale, and ARGBInterpolate.
+void InterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr,
+ int width, int source_y_fraction);
+void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_NEON(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_Any_NEON(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_Any_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_Any_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_Any_AVX2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+void InterpolateRow_Any_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride_ptr, int width,
+ int source_y_fraction);
+
+void InterpolateRow_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ ptrdiff_t src_stride_ptr,
+ int width, int source_y_fraction);
+
+// Sobel images.
+void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2,
+ uint8* dst_sobelx, int width);
+void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobelx, int width);
+void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobelx, int width);
+void SobelYRow_C(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width);
+void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width);
+void SobelYRow_NEON(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width);
+void SobelRow_C(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelToPlaneRow_C(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width);
+void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width);
+void SobelToPlaneRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width);
+void SobelXYRow_C(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelRow_Any_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelRow_Any_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelToPlaneRow_Any_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width);
+void SobelToPlaneRow_Any_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width);
+void SobelXYRow_Any_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+void SobelXYRow_Any_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width);
+
+void ARGBPolynomialRow_C(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width);
+void ARGBPolynomialRow_SSE2(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width);
+void ARGBPolynomialRow_AVX2(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width);
+
+void ARGBLumaColorTableRow_C(const uint8* src_argb, uint8* dst_argb, int width,
+ const uint8* luma, uint32 lumacoeff);
+void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ int width,
+ const uint8* luma, uint32 lumacoeff);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_ROW_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/scale.h b/media/libaom/src/third_party/libyuv/include/libyuv/scale.h
new file mode 100644
index 000000000..3974aba34
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/scale.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_SCALE_H_ // NOLINT
+#define INCLUDE_LIBYUV_SCALE_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Supported filtering.
+typedef enum FilterMode {
+ kFilterNone = 0, // Point sample; Fastest.
+ kFilterLinear = 1, // Filter horizontally only.
+ kFilterBilinear = 2, // Faster than box, but lower quality scaling down.
+ kFilterBox = 3 // Highest quality.
+} FilterModeEnum;
+
+// Scale a YUV plane.
+LIBYUV_API
+void ScalePlane(const uint8* src, int src_stride,
+ int src_width, int src_height,
+ uint8* dst, int dst_stride,
+ int dst_width, int dst_height,
+ enum FilterMode filtering);
+
+LIBYUV_API
+void ScalePlane_16(const uint16* src, int src_stride,
+ int src_width, int src_height,
+ uint16* dst, int dst_stride,
+ int dst_width, int dst_height,
+ enum FilterMode filtering);
+
+// Scales a YUV 4:2:0 image from the src width and height to the
+// dst width and height.
+// If filtering is kFilterNone, a simple nearest-neighbor algorithm is
+// used. This produces basic (blocky) quality at the fastest speed.
+// If filtering is kFilterBilinear, interpolation is used to produce a better
+// quality image, at the expense of speed.
+// If filtering is kFilterBox, averaging is used to produce ever better
+// quality image, at further expense of speed.
+// Returns 0 if successful.
+
+LIBYUV_API
+int I420Scale(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ int src_width, int src_height,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int dst_width, int dst_height,
+ enum FilterMode filtering);
+
+LIBYUV_API
+int I420Scale_16(const uint16* src_y, int src_stride_y,
+ const uint16* src_u, int src_stride_u,
+ const uint16* src_v, int src_stride_v,
+ int src_width, int src_height,
+ uint16* dst_y, int dst_stride_y,
+ uint16* dst_u, int dst_stride_u,
+ uint16* dst_v, int dst_stride_v,
+ int dst_width, int dst_height,
+ enum FilterMode filtering);
+
+#ifdef __cplusplus
+// Legacy API. Deprecated.
+LIBYUV_API
+int Scale(const uint8* src_y, const uint8* src_u, const uint8* src_v,
+ int src_stride_y, int src_stride_u, int src_stride_v,
+ int src_width, int src_height,
+ uint8* dst_y, uint8* dst_u, uint8* dst_v,
+ int dst_stride_y, int dst_stride_u, int dst_stride_v,
+ int dst_width, int dst_height,
+ LIBYUV_BOOL interpolate);
+
+// Legacy API. Deprecated.
+LIBYUV_API
+int ScaleOffset(const uint8* src_i420, int src_width, int src_height,
+ uint8* dst_i420, int dst_width, int dst_height, int dst_yoffset,
+ LIBYUV_BOOL interpolate);
+
+// For testing, allow disabling of specialized scalers.
+LIBYUV_API
+void SetUseReferenceImpl(LIBYUV_BOOL use);
+#endif // __cplusplus
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_SCALE_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/scale_argb.h b/media/libaom/src/third_party/libyuv/include/libyuv/scale_argb.h
new file mode 100644
index 000000000..22563837d
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/scale_argb.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_SCALE_ARGB_H_ // NOLINT
+#define INCLUDE_LIBYUV_SCALE_ARGB_H_
+
+#include "libyuv/basic_types.h"
+#include "libyuv/scale.h" // For FilterMode
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+LIBYUV_API
+int ARGBScale(const uint8* src_argb, int src_stride_argb,
+ int src_width, int src_height,
+ uint8* dst_argb, int dst_stride_argb,
+ int dst_width, int dst_height,
+ enum FilterMode filtering);
+
+// Clipped scale takes destination rectangle coordinates for clip values.
+LIBYUV_API
+int ARGBScaleClip(const uint8* src_argb, int src_stride_argb,
+ int src_width, int src_height,
+ uint8* dst_argb, int dst_stride_argb,
+ int dst_width, int dst_height,
+ int clip_x, int clip_y, int clip_width, int clip_height,
+ enum FilterMode filtering);
+
+// TODO(fbarchard): Implement this.
+// Scale with YUV conversion to ARGB and clipping.
+LIBYUV_API
+int YUVToARGBScaleClip(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint32 src_fourcc,
+ int src_width, int src_height,
+ uint8* dst_argb, int dst_stride_argb,
+ uint32 dst_fourcc,
+ int dst_width, int dst_height,
+ int clip_x, int clip_y, int clip_width, int clip_height,
+ enum FilterMode filtering);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_SCALE_ARGB_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/scale_row.h b/media/libaom/src/third_party/libyuv/include/libyuv/scale_row.h
new file mode 100644
index 000000000..a46b5ce69
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/scale_row.h
@@ -0,0 +1,479 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_SCALE_ROW_H_ // NOLINT
+#define INCLUDE_LIBYUV_SCALE_ROW_H_
+
+#include "libyuv/basic_types.h"
+#include "libyuv/scale.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if defined(__pnacl__) || defined(__CLR_VER) || \
+ (defined(__i386__) && !defined(__SSE2__))
+#define LIBYUV_DISABLE_X86
+#endif
+
+// Visual C 2012 required for AVX2.
+#if defined(_M_IX86) && !defined(__clang__) && \
+ defined(_MSC_VER) && _MSC_VER >= 1700
+#define VISUALC_HAS_AVX2 1
+#endif // VisualStudio >= 2012
+
+// The following are available on all x86 platforms:
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
+#define HAS_FIXEDDIV1_X86
+#define HAS_FIXEDDIV_X86
+#define HAS_SCALEARGBCOLS_SSE2
+#define HAS_SCALEARGBCOLSUP2_SSE2
+#define HAS_SCALEARGBFILTERCOLS_SSSE3
+#define HAS_SCALEARGBROWDOWN2_SSE2
+#define HAS_SCALEARGBROWDOWNEVEN_SSE2
+#define HAS_SCALECOLSUP2_SSE2
+#define HAS_SCALEFILTERCOLS_SSSE3
+#define HAS_SCALEROWDOWN2_SSE2
+#define HAS_SCALEROWDOWN34_SSSE3
+#define HAS_SCALEROWDOWN38_SSSE3
+#define HAS_SCALEROWDOWN4_SSE2
+#endif
+
+// The following are available on VS2012:
+#if !defined(LIBYUV_DISABLE_X86) && defined(VISUALC_HAS_AVX2)
+#define HAS_SCALEADDROW_AVX2
+#define HAS_SCALEROWDOWN2_AVX2
+#define HAS_SCALEROWDOWN4_AVX2
+#endif
+
+// The following are available on Visual C:
+#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && !defined(__clang__)
+#define HAS_SCALEADDROW_SSE2
+#endif
+
+// The following are available on Neon platforms:
+#if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \
+ (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
+#define HAS_SCALEARGBCOLS_NEON
+#define HAS_SCALEARGBROWDOWN2_NEON
+#define HAS_SCALEARGBROWDOWNEVEN_NEON
+#define HAS_SCALEFILTERCOLS_NEON
+#define HAS_SCALEROWDOWN2_NEON
+#define HAS_SCALEROWDOWN34_NEON
+#define HAS_SCALEROWDOWN38_NEON
+#define HAS_SCALEROWDOWN4_NEON
+#define HAS_SCALEARGBFILTERCOLS_NEON
+#endif
+
+// The following are available on Mips platforms:
+#if !defined(LIBYUV_DISABLE_MIPS) && !defined(__native_client__) && \
+ defined(__mips__) && defined(__mips_dsp) && (__mips_dsp_rev >= 2)
+#define HAS_SCALEROWDOWN2_MIPS_DSPR2
+#define HAS_SCALEROWDOWN4_MIPS_DSPR2
+#define HAS_SCALEROWDOWN34_MIPS_DSPR2
+#define HAS_SCALEROWDOWN38_MIPS_DSPR2
+#endif
+
+// Scale ARGB vertically with bilinear interpolation.
+void ScalePlaneVertical(int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int y, int dy,
+ int bpp, enum FilterMode filtering);
+
+void ScalePlaneVertical_16(int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_argb, uint16* dst_argb,
+ int x, int y, int dy,
+ int wpp, enum FilterMode filtering);
+
+// Simplify the filtering based on scale factors.
+enum FilterMode ScaleFilterReduce(int src_width, int src_height,
+ int dst_width, int dst_height,
+ enum FilterMode filtering);
+
+// Divide num by div and return as 16.16 fixed point result.
+int FixedDiv_C(int num, int div);
+int FixedDiv_X86(int num, int div);
+// Divide num - 1 by div - 1 and return as 16.16 fixed point result.
+int FixedDiv1_C(int num, int div);
+int FixedDiv1_X86(int num, int div);
+#ifdef HAS_FIXEDDIV_X86
+#define FixedDiv FixedDiv_X86
+#define FixedDiv1 FixedDiv1_X86
+#else
+#define FixedDiv FixedDiv_C
+#define FixedDiv1 FixedDiv1_C
+#endif
+
+// Compute slope values for stepping.
+void ScaleSlope(int src_width, int src_height,
+ int dst_width, int dst_height,
+ enum FilterMode filtering,
+ int* x, int* y, int* dx, int* dy);
+
+void ScaleRowDown2_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown2Linear_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Linear_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown2Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown4_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown4Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown34_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown34_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown34_0_Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width);
+void ScaleRowDown34_0_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* d, int dst_width);
+void ScaleRowDown34_1_Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width);
+void ScaleRowDown34_1_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* d, int dst_width);
+void ScaleCols_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleCols_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleColsUp2_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int, int);
+void ScaleColsUp2_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int, int);
+void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleFilterCols_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleFilterCols64_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleFilterCols64_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleRowDown38_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown38_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width);
+void ScaleRowDown38_3_Box_C(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_3_Box_16_C(const uint16* src_ptr,
+ ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width);
+void ScaleRowDown38_2_Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_2_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width);
+void ScaleAddRow_C(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+void ScaleAddRow_16_C(const uint16* src_ptr, uint32* dst_ptr, int src_width);
+void ScaleARGBRowDown2_C(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Linear_C(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Box_C(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEven_C(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEvenBox_C(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBCols_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBCols64_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int, int);
+void ScaleARGBFilterCols_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBFilterCols64_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+
+// Specialized scalers for x86.
+void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+void ScaleRowDown34_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Linear_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Box_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Linear_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown2Box_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4Box_Any_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4Box_Any_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+void ScaleRowDown34_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_1_Box_Any_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_0_Box_Any_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_Any_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_3_Box_Any_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_2_Box_Any_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+void ScaleAddRow_SSE2(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+void ScaleAddRow_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+void ScaleAddRow_Any_SSE2(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+void ScaleAddRow_Any_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+
+void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+void ScaleColsUp2_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+
+
+// ARGB Column functions
+void ScaleARGBCols_SSE2(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBColsUp2_SSE2(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBFilterCols_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBFilterCols_Any_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+void ScaleARGBCols_Any_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx);
+
+// ARGB Row functions
+void ScaleARGBRowDown2_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Linear_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleARGBRowDown2Linear_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleARGBRowDown2_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Linear_Any_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Box_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleARGBRowDown2Linear_Any_NEON(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDown2Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+
+void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx, uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEven_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEven_Any_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEvenBox_Any_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEven_Any_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+void ScaleARGBRowDownEvenBox_Any_NEON(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width);
+
+// ScaleRowDown2Box also used by planar functions
+// NEON downscalers with interpolation.
+
+// Note - not static due to reuse in convert for 444 to 420.
+void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Linear_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+
+void ScaleRowDown4_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+// Down scale from 4 to 3 pixels. Use the neon multilane read/write
+// to load up the every 4th pixel into a 4 different registers.
+// Point samples 32 pixels to 24 pixels.
+void ScaleRowDown34_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_0_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_1_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+// 32 -> 12
+void ScaleRowDown38_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+// 32x3 -> 12x1
+void ScaleRowDown38_3_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+// 32x2 -> 12x1
+void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+void ScaleRowDown2_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Linear_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown4Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_0_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown34_1_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+// 32 -> 12
+void ScaleRowDown38_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+// 32x3 -> 12x1
+void ScaleRowDown38_3_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+// 32x2 -> 12x1
+void ScaleRowDown38_2_Box_Any_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+void ScaleAddRow_NEON(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+void ScaleAddRow_Any_NEON(const uint8* src_ptr, uint16* dst_ptr, int src_width);
+
+void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+
+void ScaleFilterCols_Any_NEON(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx);
+
+
+void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width);
+void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width);
+void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width);
+void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_SCALE_ROW_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/version.h b/media/libaom/src/third_party/libyuv/include/libyuv/version.h
new file mode 100644
index 000000000..287b98ebf
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/version.h
@@ -0,0 +1,17 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef INCLUDE_LIBYUV_VERSION_H_ // NOLINT
+#define INCLUDE_LIBYUV_VERSION_H_
+
+#define LIBYUV_VERSION 1456
+
+#endif // INCLUDE_LIBYUV_VERSION_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/include/libyuv/video_common.h b/media/libaom/src/third_party/libyuv/include/libyuv/video_common.h
new file mode 100644
index 000000000..7b0a19cc9
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/include/libyuv/video_common.h
@@ -0,0 +1,183 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+// Common definitions for video, including fourcc and VideoFormat.
+
+#ifndef INCLUDE_LIBYUV_VIDEO_COMMON_H_ // NOLINT
+#define INCLUDE_LIBYUV_VIDEO_COMMON_H_
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+//////////////////////////////////////////////////////////////////////////////
+// Definition of FourCC codes
+//////////////////////////////////////////////////////////////////////////////
+
+// Convert four characters to a FourCC code.
+// Needs to be a macro otherwise the OS X compiler complains when the kFormat*
+// constants are used in a switch.
+#ifdef __cplusplus
+#define FOURCC(a, b, c, d) ( \
+ (static_cast<uint32>(a)) | (static_cast<uint32>(b) << 8) | \
+ (static_cast<uint32>(c) << 16) | (static_cast<uint32>(d) << 24))
+#else
+#define FOURCC(a, b, c, d) ( \
+ ((uint32)(a)) | ((uint32)(b) << 8) | /* NOLINT */ \
+ ((uint32)(c) << 16) | ((uint32)(d) << 24)) /* NOLINT */
+#endif
+
+// Some pages discussing FourCC codes:
+// http://www.fourcc.org/yuv.php
+// http://v4l2spec.bytesex.org/spec/book1.htm
+// http://developer.apple.com/quicktime/icefloe/dispatch020.html
+// http://msdn.microsoft.com/library/windows/desktop/dd206750.aspx#nv12
+// http://people.xiph.org/~xiphmont/containers/nut/nut4cc.txt
+
+// FourCC codes grouped according to implementation efficiency.
+// Primary formats should convert in 1 efficient step.
+// Secondary formats are converted in 2 steps.
+// Auxilliary formats call primary converters.
+enum FourCC {
+ // 9 Primary YUV formats: 5 planar, 2 biplanar, 2 packed.
+ FOURCC_I420 = FOURCC('I', '4', '2', '0'),
+ FOURCC_I422 = FOURCC('I', '4', '2', '2'),
+ FOURCC_I444 = FOURCC('I', '4', '4', '4'),
+ FOURCC_I411 = FOURCC('I', '4', '1', '1'),
+ FOURCC_I400 = FOURCC('I', '4', '0', '0'),
+ FOURCC_NV21 = FOURCC('N', 'V', '2', '1'),
+ FOURCC_NV12 = FOURCC('N', 'V', '1', '2'),
+ FOURCC_YUY2 = FOURCC('Y', 'U', 'Y', '2'),
+ FOURCC_UYVY = FOURCC('U', 'Y', 'V', 'Y'),
+
+ // 2 Secondary YUV formats: row biplanar.
+ FOURCC_M420 = FOURCC('M', '4', '2', '0'),
+ FOURCC_Q420 = FOURCC('Q', '4', '2', '0'), // deprecated.
+
+ // 9 Primary RGB formats: 4 32 bpp, 2 24 bpp, 3 16 bpp.
+ FOURCC_ARGB = FOURCC('A', 'R', 'G', 'B'),
+ FOURCC_BGRA = FOURCC('B', 'G', 'R', 'A'),
+ FOURCC_ABGR = FOURCC('A', 'B', 'G', 'R'),
+ FOURCC_24BG = FOURCC('2', '4', 'B', 'G'),
+ FOURCC_RAW = FOURCC('r', 'a', 'w', ' '),
+ FOURCC_RGBA = FOURCC('R', 'G', 'B', 'A'),
+ FOURCC_RGBP = FOURCC('R', 'G', 'B', 'P'), // rgb565 LE.
+ FOURCC_RGBO = FOURCC('R', 'G', 'B', 'O'), // argb1555 LE.
+ FOURCC_R444 = FOURCC('R', '4', '4', '4'), // argb4444 LE.
+
+ // 4 Secondary RGB formats: 4 Bayer Patterns. deprecated.
+ FOURCC_RGGB = FOURCC('R', 'G', 'G', 'B'),
+ FOURCC_BGGR = FOURCC('B', 'G', 'G', 'R'),
+ FOURCC_GRBG = FOURCC('G', 'R', 'B', 'G'),
+ FOURCC_GBRG = FOURCC('G', 'B', 'R', 'G'),
+
+ // 1 Primary Compressed YUV format.
+ FOURCC_MJPG = FOURCC('M', 'J', 'P', 'G'),
+
+ // 5 Auxiliary YUV variations: 3 with U and V planes are swapped, 1 Alias.
+ FOURCC_YV12 = FOURCC('Y', 'V', '1', '2'),
+ FOURCC_YV16 = FOURCC('Y', 'V', '1', '6'),
+ FOURCC_YV24 = FOURCC('Y', 'V', '2', '4'),
+ FOURCC_YU12 = FOURCC('Y', 'U', '1', '2'), // Linux version of I420.
+ FOURCC_J420 = FOURCC('J', '4', '2', '0'),
+ FOURCC_J400 = FOURCC('J', '4', '0', '0'),
+
+ // 14 Auxiliary aliases. CanonicalFourCC() maps these to canonical fourcc.
+ FOURCC_IYUV = FOURCC('I', 'Y', 'U', 'V'), // Alias for I420.
+ FOURCC_YU16 = FOURCC('Y', 'U', '1', '6'), // Alias for I422.
+ FOURCC_YU24 = FOURCC('Y', 'U', '2', '4'), // Alias for I444.
+ FOURCC_YUYV = FOURCC('Y', 'U', 'Y', 'V'), // Alias for YUY2.
+ FOURCC_YUVS = FOURCC('y', 'u', 'v', 's'), // Alias for YUY2 on Mac.
+ FOURCC_HDYC = FOURCC('H', 'D', 'Y', 'C'), // Alias for UYVY.
+ FOURCC_2VUY = FOURCC('2', 'v', 'u', 'y'), // Alias for UYVY on Mac.
+ FOURCC_JPEG = FOURCC('J', 'P', 'E', 'G'), // Alias for MJPG.
+ FOURCC_DMB1 = FOURCC('d', 'm', 'b', '1'), // Alias for MJPG on Mac.
+ FOURCC_BA81 = FOURCC('B', 'A', '8', '1'), // Alias for BGGR.
+ FOURCC_RGB3 = FOURCC('R', 'G', 'B', '3'), // Alias for RAW.
+ FOURCC_BGR3 = FOURCC('B', 'G', 'R', '3'), // Alias for 24BG.
+ FOURCC_CM32 = FOURCC(0, 0, 0, 32), // Alias for BGRA kCMPixelFormat_32ARGB
+ FOURCC_CM24 = FOURCC(0, 0, 0, 24), // Alias for RAW kCMPixelFormat_24RGB
+ FOURCC_L555 = FOURCC('L', '5', '5', '5'), // Alias for RGBO.
+ FOURCC_L565 = FOURCC('L', '5', '6', '5'), // Alias for RGBP.
+ FOURCC_5551 = FOURCC('5', '5', '5', '1'), // Alias for RGBO.
+
+ // 1 Auxiliary compressed YUV format set aside for capturer.
+ FOURCC_H264 = FOURCC('H', '2', '6', '4'),
+
+ // Match any fourcc.
+ FOURCC_ANY = -1,
+};
+
+enum FourCCBpp {
+ // Canonical fourcc codes used in our code.
+ FOURCC_BPP_I420 = 12,
+ FOURCC_BPP_I422 = 16,
+ FOURCC_BPP_I444 = 24,
+ FOURCC_BPP_I411 = 12,
+ FOURCC_BPP_I400 = 8,
+ FOURCC_BPP_NV21 = 12,
+ FOURCC_BPP_NV12 = 12,
+ FOURCC_BPP_YUY2 = 16,
+ FOURCC_BPP_UYVY = 16,
+ FOURCC_BPP_M420 = 12,
+ FOURCC_BPP_Q420 = 12,
+ FOURCC_BPP_ARGB = 32,
+ FOURCC_BPP_BGRA = 32,
+ FOURCC_BPP_ABGR = 32,
+ FOURCC_BPP_RGBA = 32,
+ FOURCC_BPP_24BG = 24,
+ FOURCC_BPP_RAW = 24,
+ FOURCC_BPP_RGBP = 16,
+ FOURCC_BPP_RGBO = 16,
+ FOURCC_BPP_R444 = 16,
+ FOURCC_BPP_RGGB = 8,
+ FOURCC_BPP_BGGR = 8,
+ FOURCC_BPP_GRBG = 8,
+ FOURCC_BPP_GBRG = 8,
+ FOURCC_BPP_YV12 = 12,
+ FOURCC_BPP_YV16 = 16,
+ FOURCC_BPP_YV24 = 24,
+ FOURCC_BPP_YU12 = 12,
+ FOURCC_BPP_J420 = 12,
+ FOURCC_BPP_J400 = 8,
+ FOURCC_BPP_MJPG = 0, // 0 means unknown.
+ FOURCC_BPP_H264 = 0,
+ FOURCC_BPP_IYUV = 12,
+ FOURCC_BPP_YU16 = 16,
+ FOURCC_BPP_YU24 = 24,
+ FOURCC_BPP_YUYV = 16,
+ FOURCC_BPP_YUVS = 16,
+ FOURCC_BPP_HDYC = 16,
+ FOURCC_BPP_2VUY = 16,
+ FOURCC_BPP_JPEG = 1,
+ FOURCC_BPP_DMB1 = 1,
+ FOURCC_BPP_BA81 = 8,
+ FOURCC_BPP_RGB3 = 24,
+ FOURCC_BPP_BGR3 = 24,
+ FOURCC_BPP_CM32 = 32,
+ FOURCC_BPP_CM24 = 24,
+
+ // Match any fourcc.
+ FOURCC_BPP_ANY = 0, // 0 means unknown.
+};
+
+// Converts fourcc aliases into canonical ones.
+LIBYUV_API uint32 CanonicalFourCC(uint32 fourcc);
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+#endif // INCLUDE_LIBYUV_VIDEO_COMMON_H_ NOLINT
diff --git a/media/libaom/src/third_party/libyuv/source/compare.cc b/media/libaom/src/third_party/libyuv/source/compare.cc
new file mode 100644
index 000000000..46aa8473d
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/compare.cc
@@ -0,0 +1,373 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/compare.h"
+
+#include <float.h>
+#include <math.h>
+#ifdef _OPENMP
+#include <omp.h>
+#endif
+
+#include "libyuv/basic_types.h"
+#include "libyuv/cpu_id.h"
+#include "libyuv/row.h"
+#include "libyuv/video_common.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// hash seed of 5381 recommended.
+// Internal C version of HashDjb2 with int sized count for efficiency.
+uint32 HashDjb2_C(const uint8* src, int count, uint32 seed);
+
+// This module is for Visual C x86
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || \
+ (defined(__x86_64__) || (defined(__i386__) && !defined(__pic__))))
+#define HAS_HASHDJB2_SSE41
+uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed);
+
+#ifdef VISUALC_HAS_AVX2
+#define HAS_HASHDJB2_AVX2
+uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed);
+#endif
+
+#endif // HAS_HASHDJB2_SSE41
+
+// hash seed of 5381 recommended.
+LIBYUV_API
+uint32 HashDjb2(const uint8* src, uint64 count, uint32 seed) {
+ const int kBlockSize = 1 << 15; // 32768;
+ int remainder;
+ uint32 (*HashDjb2_SSE)(const uint8* src, int count, uint32 seed) = HashDjb2_C;
+#if defined(HAS_HASHDJB2_SSE41)
+ if (TestCpuFlag(kCpuHasSSE41)) {
+ HashDjb2_SSE = HashDjb2_SSE41;
+ }
+#endif
+#if defined(HAS_HASHDJB2_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ HashDjb2_SSE = HashDjb2_AVX2;
+ }
+#endif
+
+ while (count >= (uint64)(kBlockSize)) {
+ seed = HashDjb2_SSE(src, kBlockSize, seed);
+ src += kBlockSize;
+ count -= kBlockSize;
+ }
+ remainder = (int)(count) & ~15;
+ if (remainder) {
+ seed = HashDjb2_SSE(src, remainder, seed);
+ src += remainder;
+ count -= remainder;
+ }
+ remainder = (int)(count) & 15;
+ if (remainder) {
+ seed = HashDjb2_C(src, remainder, seed);
+ }
+ return seed;
+}
+
+static uint32 ARGBDetectRow_C(const uint8* argb, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ if (argb[0] != 255) { // First byte is not Alpha of 255, so not ARGB.
+ return FOURCC_BGRA;
+ }
+ if (argb[3] != 255) { // 4th byte is not Alpha of 255, so not BGRA.
+ return FOURCC_ARGB;
+ }
+ if (argb[4] != 255) { // Second pixel first byte is not Alpha of 255.
+ return FOURCC_BGRA;
+ }
+ if (argb[7] != 255) { // Second pixel 4th byte is not Alpha of 255.
+ return FOURCC_ARGB;
+ }
+ argb += 8;
+ }
+ if (width & 1) {
+ if (argb[0] != 255) { // First byte is not Alpha of 255, so not ARGB.
+ return FOURCC_BGRA;
+ }
+ if (argb[3] != 255) { // 4th byte is not Alpha of 255, so not BGRA.
+ return FOURCC_ARGB;
+ }
+ }
+ return 0;
+}
+
+// Scan an opaque argb image and return fourcc based on alpha offset.
+// Returns FOURCC_ARGB, FOURCC_BGRA, or 0 if unknown.
+LIBYUV_API
+uint32 ARGBDetect(const uint8* argb, int stride_argb, int width, int height) {
+ uint32 fourcc = 0;
+ int h;
+
+ // Coalesce rows.
+ if (stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ stride_argb = 0;
+ }
+ for (h = 0; h < height && fourcc == 0; ++h) {
+ fourcc = ARGBDetectRow_C(argb, width);
+ argb += stride_argb;
+ }
+ return fourcc;
+}
+
+uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count);
+#if !defined(LIBYUV_DISABLE_NEON) && \
+ (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
+#define HAS_SUMSQUAREERROR_NEON
+uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count);
+#endif
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || defined(__x86_64__) || defined(__i386__))
+#define HAS_SUMSQUAREERROR_SSE2
+uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count);
+#endif
+
+#ifdef VISUALC_HAS_AVX2
+#define HAS_SUMSQUAREERROR_AVX2
+uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count);
+#endif
+
+// TODO(fbarchard): Refactor into row function.
+LIBYUV_API
+uint64 ComputeSumSquareError(const uint8* src_a, const uint8* src_b,
+ int count) {
+ // SumSquareError returns values 0 to 65535 for each squared difference.
+ // Up to 65536 of those can be summed and remain within a uint32.
+ // After each block of 65536 pixels, accumulate into a uint64.
+ const int kBlockSize = 65536;
+ int remainder = count & (kBlockSize - 1) & ~31;
+ uint64 sse = 0;
+ int i;
+ uint32 (*SumSquareError)(const uint8* src_a, const uint8* src_b, int count) =
+ SumSquareError_C;
+#if defined(HAS_SUMSQUAREERROR_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SumSquareError = SumSquareError_NEON;
+ }
+#endif
+#if defined(HAS_SUMSQUAREERROR_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ // Note only used for multiples of 16 so count is not checked.
+ SumSquareError = SumSquareError_SSE2;
+ }
+#endif
+#if defined(HAS_SUMSQUAREERROR_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ // Note only used for multiples of 32 so count is not checked.
+ SumSquareError = SumSquareError_AVX2;
+ }
+#endif
+#ifdef _OPENMP
+#pragma omp parallel for reduction(+: sse)
+#endif
+ for (i = 0; i < (count - (kBlockSize - 1)); i += kBlockSize) {
+ sse += SumSquareError(src_a + i, src_b + i, kBlockSize);
+ }
+ src_a += count & ~(kBlockSize - 1);
+ src_b += count & ~(kBlockSize - 1);
+ if (remainder) {
+ sse += SumSquareError(src_a, src_b, remainder);
+ src_a += remainder;
+ src_b += remainder;
+ }
+ remainder = count & 31;
+ if (remainder) {
+ sse += SumSquareError_C(src_a, src_b, remainder);
+ }
+ return sse;
+}
+
+LIBYUV_API
+uint64 ComputeSumSquareErrorPlane(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b,
+ int width, int height) {
+ uint64 sse = 0;
+ int h;
+ // Coalesce rows.
+ if (stride_a == width &&
+ stride_b == width) {
+ width *= height;
+ height = 1;
+ stride_a = stride_b = 0;
+ }
+ for (h = 0; h < height; ++h) {
+ sse += ComputeSumSquareError(src_a, src_b, width);
+ src_a += stride_a;
+ src_b += stride_b;
+ }
+ return sse;
+}
+
+LIBYUV_API
+double SumSquareErrorToPsnr(uint64 sse, uint64 count) {
+ double psnr;
+ if (sse > 0) {
+ double mse = (double)(count) / (double)(sse);
+ psnr = 10.0 * log10(255.0 * 255.0 * mse);
+ } else {
+ psnr = kMaxPsnr; // Limit to prevent divide by 0
+ }
+
+ if (psnr > kMaxPsnr)
+ psnr = kMaxPsnr;
+
+ return psnr;
+}
+
+LIBYUV_API
+double CalcFramePsnr(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b,
+ int width, int height) {
+ const uint64 samples = width * height;
+ const uint64 sse = ComputeSumSquareErrorPlane(src_a, stride_a,
+ src_b, stride_b,
+ width, height);
+ return SumSquareErrorToPsnr(sse, samples);
+}
+
+LIBYUV_API
+double I420Psnr(const uint8* src_y_a, int stride_y_a,
+ const uint8* src_u_a, int stride_u_a,
+ const uint8* src_v_a, int stride_v_a,
+ const uint8* src_y_b, int stride_y_b,
+ const uint8* src_u_b, int stride_u_b,
+ const uint8* src_v_b, int stride_v_b,
+ int width, int height) {
+ const uint64 sse_y = ComputeSumSquareErrorPlane(src_y_a, stride_y_a,
+ src_y_b, stride_y_b,
+ width, height);
+ const int width_uv = (width + 1) >> 1;
+ const int height_uv = (height + 1) >> 1;
+ const uint64 sse_u = ComputeSumSquareErrorPlane(src_u_a, stride_u_a,
+ src_u_b, stride_u_b,
+ width_uv, height_uv);
+ const uint64 sse_v = ComputeSumSquareErrorPlane(src_v_a, stride_v_a,
+ src_v_b, stride_v_b,
+ width_uv, height_uv);
+ const uint64 samples = width * height + 2 * (width_uv * height_uv);
+ const uint64 sse = sse_y + sse_u + sse_v;
+ return SumSquareErrorToPsnr(sse, samples);
+}
+
+static const int64 cc1 = 26634; // (64^2*(.01*255)^2
+static const int64 cc2 = 239708; // (64^2*(.03*255)^2
+
+static double Ssim8x8_C(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b) {
+ int64 sum_a = 0;
+ int64 sum_b = 0;
+ int64 sum_sq_a = 0;
+ int64 sum_sq_b = 0;
+ int64 sum_axb = 0;
+
+ int i;
+ for (i = 0; i < 8; ++i) {
+ int j;
+ for (j = 0; j < 8; ++j) {
+ sum_a += src_a[j];
+ sum_b += src_b[j];
+ sum_sq_a += src_a[j] * src_a[j];
+ sum_sq_b += src_b[j] * src_b[j];
+ sum_axb += src_a[j] * src_b[j];
+ }
+
+ src_a += stride_a;
+ src_b += stride_b;
+ }
+
+ {
+ const int64 count = 64;
+ // scale the constants by number of pixels
+ const int64 c1 = (cc1 * count * count) >> 12;
+ const int64 c2 = (cc2 * count * count) >> 12;
+
+ const int64 sum_a_x_sum_b = sum_a * sum_b;
+
+ const int64 ssim_n = (2 * sum_a_x_sum_b + c1) *
+ (2 * count * sum_axb - 2 * sum_a_x_sum_b + c2);
+
+ const int64 sum_a_sq = sum_a*sum_a;
+ const int64 sum_b_sq = sum_b*sum_b;
+
+ const int64 ssim_d = (sum_a_sq + sum_b_sq + c1) *
+ (count * sum_sq_a - sum_a_sq +
+ count * sum_sq_b - sum_b_sq + c2);
+
+ if (ssim_d == 0.0) {
+ return DBL_MAX;
+ }
+ return ssim_n * 1.0 / ssim_d;
+ }
+}
+
+// We are using a 8x8 moving window with starting location of each 8x8 window
+// on the 4x4 pixel grid. Such arrangement allows the windows to overlap
+// block boundaries to penalize blocking artifacts.
+LIBYUV_API
+double CalcFrameSsim(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b,
+ int width, int height) {
+ int samples = 0;
+ double ssim_total = 0;
+ double (*Ssim8x8)(const uint8* src_a, int stride_a,
+ const uint8* src_b, int stride_b) = Ssim8x8_C;
+
+ // sample point start with each 4x4 location
+ int i;
+ for (i = 0; i < height - 8; i += 4) {
+ int j;
+ for (j = 0; j < width - 8; j += 4) {
+ ssim_total += Ssim8x8(src_a + j, stride_a, src_b + j, stride_b);
+ samples++;
+ }
+
+ src_a += stride_a * 4;
+ src_b += stride_b * 4;
+ }
+
+ ssim_total /= samples;
+ return ssim_total;
+}
+
+LIBYUV_API
+double I420Ssim(const uint8* src_y_a, int stride_y_a,
+ const uint8* src_u_a, int stride_u_a,
+ const uint8* src_v_a, int stride_v_a,
+ const uint8* src_y_b, int stride_y_b,
+ const uint8* src_u_b, int stride_u_b,
+ const uint8* src_v_b, int stride_v_b,
+ int width, int height) {
+ const double ssim_y = CalcFrameSsim(src_y_a, stride_y_a,
+ src_y_b, stride_y_b, width, height);
+ const int width_uv = (width + 1) >> 1;
+ const int height_uv = (height + 1) >> 1;
+ const double ssim_u = CalcFrameSsim(src_u_a, stride_u_a,
+ src_u_b, stride_u_b,
+ width_uv, height_uv);
+ const double ssim_v = CalcFrameSsim(src_v_a, stride_v_a,
+ src_v_b, stride_v_b,
+ width_uv, height_uv);
+ return ssim_y * 0.8 + 0.1 * (ssim_u + ssim_v);
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/compare_common.cc b/media/libaom/src/third_party/libyuv/source/compare_common.cc
new file mode 100644
index 000000000..c546b5182
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/compare_common.cc
@@ -0,0 +1,42 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+uint32 SumSquareError_C(const uint8* src_a, const uint8* src_b, int count) {
+ uint32 sse = 0u;
+ int i;
+ for (i = 0; i < count; ++i) {
+ int diff = src_a[i] - src_b[i];
+ sse += (uint32)(diff * diff);
+ }
+ return sse;
+}
+
+// hash seed of 5381 recommended.
+// Internal C version of HashDjb2 with int sized count for efficiency.
+uint32 HashDjb2_C(const uint8* src, int count, uint32 seed) {
+ uint32 hash = seed;
+ int i;
+ for (i = 0; i < count; ++i) {
+ hash += (hash << 5) + src[i];
+ }
+ return hash;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/compare_gcc.cc b/media/libaom/src/third_party/libyuv/source/compare_gcc.cc
new file mode 100644
index 000000000..247cb33bb
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/compare_gcc.cc
@@ -0,0 +1,152 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/basic_types.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
+
+uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) {
+ uint32 sse;
+ asm volatile ( // NOLINT
+ "pxor %%xmm0,%%xmm0 \n"
+ "pxor %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x10, 0) ",%0 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm2 \n"
+ "lea " MEMLEA(0x10, 1) ",%1 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "psubusb %%xmm2,%%xmm1 \n"
+ "psubusb %%xmm3,%%xmm2 \n"
+ "por %%xmm2,%%xmm1 \n"
+ "movdqa %%xmm1,%%xmm2 \n"
+ "punpcklbw %%xmm5,%%xmm1 \n"
+ "punpckhbw %%xmm5,%%xmm2 \n"
+ "pmaddwd %%xmm1,%%xmm1 \n"
+ "pmaddwd %%xmm2,%%xmm2 \n"
+ "paddd %%xmm1,%%xmm0 \n"
+ "paddd %%xmm2,%%xmm0 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+
+ "pshufd $0xee,%%xmm0,%%xmm1 \n"
+ "paddd %%xmm1,%%xmm0 \n"
+ "pshufd $0x1,%%xmm0,%%xmm1 \n"
+ "paddd %%xmm1,%%xmm0 \n"
+ "movd %%xmm0,%3 \n"
+
+ : "+r"(src_a), // %0
+ "+r"(src_b), // %1
+ "+r"(count), // %2
+ "=g"(sse) // %3
+ :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ ); // NOLINT
+ return sse;
+}
+
+#endif // defined(__x86_64__) || defined(__i386__)
+
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(__x86_64__) || (defined(__i386__) && !defined(__pic__)))
+#define HAS_HASHDJB2_SSE41
+static uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 }; // 33 ^ 16
+static uvec32 kHashMul0 = {
+ 0x0c3525e1, // 33 ^ 15
+ 0xa3476dc1, // 33 ^ 14
+ 0x3b4039a1, // 33 ^ 13
+ 0x4f5f0981, // 33 ^ 12
+};
+static uvec32 kHashMul1 = {
+ 0x30f35d61, // 33 ^ 11
+ 0x855cb541, // 33 ^ 10
+ 0x040a9121, // 33 ^ 9
+ 0x747c7101, // 33 ^ 8
+};
+static uvec32 kHashMul2 = {
+ 0xec41d4e1, // 33 ^ 7
+ 0x4cfa3cc1, // 33 ^ 6
+ 0x025528a1, // 33 ^ 5
+ 0x00121881, // 33 ^ 4
+};
+static uvec32 kHashMul3 = {
+ 0x00008c61, // 33 ^ 3
+ 0x00000441, // 33 ^ 2
+ 0x00000021, // 33 ^ 1
+ 0x00000001, // 33 ^ 0
+};
+
+uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) {
+ uint32 hash;
+ asm volatile ( // NOLINT
+ "movd %2,%%xmm0 \n"
+ "pxor %%xmm7,%%xmm7 \n"
+ "movdqa %4,%%xmm6 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x10, 0) ",%0 \n"
+ "pmulld %%xmm6,%%xmm0 \n"
+ "movdqa %5,%%xmm5 \n"
+ "movdqa %%xmm1,%%xmm2 \n"
+ "punpcklbw %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "punpcklwd %%xmm7,%%xmm3 \n"
+ "pmulld %%xmm5,%%xmm3 \n"
+ "movdqa %6,%%xmm5 \n"
+ "movdqa %%xmm2,%%xmm4 \n"
+ "punpckhwd %%xmm7,%%xmm4 \n"
+ "pmulld %%xmm5,%%xmm4 \n"
+ "movdqa %7,%%xmm5 \n"
+ "punpckhbw %%xmm7,%%xmm1 \n"
+ "movdqa %%xmm1,%%xmm2 \n"
+ "punpcklwd %%xmm7,%%xmm2 \n"
+ "pmulld %%xmm5,%%xmm2 \n"
+ "movdqa %8,%%xmm5 \n"
+ "punpckhwd %%xmm7,%%xmm1 \n"
+ "pmulld %%xmm5,%%xmm1 \n"
+ "paddd %%xmm4,%%xmm3 \n"
+ "paddd %%xmm2,%%xmm1 \n"
+ "paddd %%xmm3,%%xmm1 \n"
+ "pshufd $0xe,%%xmm1,%%xmm2 \n"
+ "paddd %%xmm2,%%xmm1 \n"
+ "pshufd $0x1,%%xmm1,%%xmm2 \n"
+ "paddd %%xmm2,%%xmm1 \n"
+ "paddd %%xmm1,%%xmm0 \n"
+ "sub $0x10,%1 \n"
+ "jg 1b \n"
+ "movd %%xmm0,%3 \n"
+ : "+r"(src), // %0
+ "+r"(count), // %1
+ "+rm"(seed), // %2
+ "=g"(hash) // %3
+ : "m"(kHash16x33), // %4
+ "m"(kHashMul0), // %5
+ "m"(kHashMul1), // %6
+ "m"(kHashMul2), // %7
+ "m"(kHashMul3) // %8
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ ); // NOLINT
+ return hash;
+}
+#endif // defined(__x86_64__) || (defined(__i386__) && !defined(__pic__)))
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
diff --git a/media/libaom/src/third_party/libyuv/source/compare_neon.cc b/media/libaom/src/third_party/libyuv/source/compare_neon.cc
new file mode 100644
index 000000000..ef006ec41
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/compare_neon.cc
@@ -0,0 +1,65 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/basic_types.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
+ !defined(__aarch64__)
+
+uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count) {
+ volatile uint32 sse;
+ asm volatile (
+ "vmov.u8 q8, #0 \n"
+ "vmov.u8 q10, #0 \n"
+ "vmov.u8 q9, #0 \n"
+ "vmov.u8 q11, #0 \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n"
+ MEMACCESS(1)
+ "vld1.8 {q1}, [%1]! \n"
+ "subs %2, %2, #16 \n"
+ "vsubl.u8 q2, d0, d2 \n"
+ "vsubl.u8 q3, d1, d3 \n"
+ "vmlal.s16 q8, d4, d4 \n"
+ "vmlal.s16 q9, d6, d6 \n"
+ "vmlal.s16 q10, d5, d5 \n"
+ "vmlal.s16 q11, d7, d7 \n"
+ "bgt 1b \n"
+
+ "vadd.u32 q8, q8, q9 \n"
+ "vadd.u32 q10, q10, q11 \n"
+ "vadd.u32 q11, q8, q10 \n"
+ "vpaddl.u32 q1, q11 \n"
+ "vadd.u64 d0, d2, d3 \n"
+ "vmov.32 %3, d0[0] \n"
+ : "+r"(src_a),
+ "+r"(src_b),
+ "+r"(count),
+ "=r"(sse)
+ :
+ : "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11");
+ return sse;
+}
+
+#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/compare_neon64.cc b/media/libaom/src/third_party/libyuv/source/compare_neon64.cc
new file mode 100644
index 000000000..6d1e5e1bc
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/compare_neon64.cc
@@ -0,0 +1,63 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/basic_types.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+uint32 SumSquareError_NEON(const uint8* src_a, const uint8* src_b, int count) {
+ volatile uint32 sse;
+ asm volatile (
+ "eor v16.16b, v16.16b, v16.16b \n"
+ "eor v18.16b, v18.16b, v18.16b \n"
+ "eor v17.16b, v17.16b, v17.16b \n"
+ "eor v19.16b, v19.16b, v19.16b \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n"
+ MEMACCESS(1)
+ "ld1 {v1.16b}, [%1], #16 \n"
+ "subs %w2, %w2, #16 \n"
+ "usubl v2.8h, v0.8b, v1.8b \n"
+ "usubl2 v3.8h, v0.16b, v1.16b \n"
+ "smlal v16.4s, v2.4h, v2.4h \n"
+ "smlal v17.4s, v3.4h, v3.4h \n"
+ "smlal2 v18.4s, v2.8h, v2.8h \n"
+ "smlal2 v19.4s, v3.8h, v3.8h \n"
+ "b.gt 1b \n"
+
+ "add v16.4s, v16.4s, v17.4s \n"
+ "add v18.4s, v18.4s, v19.4s \n"
+ "add v19.4s, v16.4s, v18.4s \n"
+ "addv s0, v19.4s \n"
+ "fmov %w3, s0 \n"
+ : "+r"(src_a),
+ "+r"(src_b),
+ "+r"(count),
+ "=r"(sse)
+ :
+ : "cc", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19");
+ return sse;
+}
+
+#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/compare_win.cc b/media/libaom/src/third_party/libyuv/source/compare_win.cc
new file mode 100644
index 000000000..19806f275
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/compare_win.cc
@@ -0,0 +1,229 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/basic_types.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for Visual C x86.
+#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \
+ defined(_MSC_VER) && !defined(__clang__)
+
+__declspec(naked)
+uint32 SumSquareError_SSE2(const uint8* src_a, const uint8* src_b, int count) {
+ __asm {
+ mov eax, [esp + 4] // src_a
+ mov edx, [esp + 8] // src_b
+ mov ecx, [esp + 12] // count
+ pxor xmm0, xmm0
+ pxor xmm5, xmm5
+
+ wloop:
+ movdqu xmm1, [eax]
+ lea eax, [eax + 16]
+ movdqu xmm2, [edx]
+ lea edx, [edx + 16]
+ movdqa xmm3, xmm1 // abs trick
+ psubusb xmm1, xmm2
+ psubusb xmm2, xmm3
+ por xmm1, xmm2
+ movdqa xmm2, xmm1
+ punpcklbw xmm1, xmm5
+ punpckhbw xmm2, xmm5
+ pmaddwd xmm1, xmm1
+ pmaddwd xmm2, xmm2
+ paddd xmm0, xmm1
+ paddd xmm0, xmm2
+ sub ecx, 16
+ jg wloop
+
+ pshufd xmm1, xmm0, 0xee
+ paddd xmm0, xmm1
+ pshufd xmm1, xmm0, 0x01
+ paddd xmm0, xmm1
+ movd eax, xmm0
+ ret
+ }
+}
+
+// Visual C 2012 required for AVX2.
+#if _MSC_VER >= 1700
+// C4752: found Intel(R) Advanced Vector Extensions; consider using /arch:AVX.
+#pragma warning(disable: 4752)
+__declspec(naked)
+uint32 SumSquareError_AVX2(const uint8* src_a, const uint8* src_b, int count) {
+ __asm {
+ mov eax, [esp + 4] // src_a
+ mov edx, [esp + 8] // src_b
+ mov ecx, [esp + 12] // count
+ vpxor ymm0, ymm0, ymm0 // sum
+ vpxor ymm5, ymm5, ymm5 // constant 0 for unpck
+ sub edx, eax
+
+ wloop:
+ vmovdqu ymm1, [eax]
+ vmovdqu ymm2, [eax + edx]
+ lea eax, [eax + 32]
+ vpsubusb ymm3, ymm1, ymm2 // abs difference trick
+ vpsubusb ymm2, ymm2, ymm1
+ vpor ymm1, ymm2, ymm3
+ vpunpcklbw ymm2, ymm1, ymm5 // u16. mutates order.
+ vpunpckhbw ymm1, ymm1, ymm5
+ vpmaddwd ymm2, ymm2, ymm2 // square + hadd to u32.
+ vpmaddwd ymm1, ymm1, ymm1
+ vpaddd ymm0, ymm0, ymm1
+ vpaddd ymm0, ymm0, ymm2
+ sub ecx, 32
+ jg wloop
+
+ vpshufd ymm1, ymm0, 0xee // 3, 2 + 1, 0 both lanes.
+ vpaddd ymm0, ymm0, ymm1
+ vpshufd ymm1, ymm0, 0x01 // 1 + 0 both lanes.
+ vpaddd ymm0, ymm0, ymm1
+ vpermq ymm1, ymm0, 0x02 // high + low lane.
+ vpaddd ymm0, ymm0, ymm1
+ vmovd eax, xmm0
+ vzeroupper
+ ret
+ }
+}
+#endif // _MSC_VER >= 1700
+
+#define HAS_HASHDJB2_SSE41
+static uvec32 kHash16x33 = { 0x92d9e201, 0, 0, 0 }; // 33 ^ 16
+static uvec32 kHashMul0 = {
+ 0x0c3525e1, // 33 ^ 15
+ 0xa3476dc1, // 33 ^ 14
+ 0x3b4039a1, // 33 ^ 13
+ 0x4f5f0981, // 33 ^ 12
+};
+static uvec32 kHashMul1 = {
+ 0x30f35d61, // 33 ^ 11
+ 0x855cb541, // 33 ^ 10
+ 0x040a9121, // 33 ^ 9
+ 0x747c7101, // 33 ^ 8
+};
+static uvec32 kHashMul2 = {
+ 0xec41d4e1, // 33 ^ 7
+ 0x4cfa3cc1, // 33 ^ 6
+ 0x025528a1, // 33 ^ 5
+ 0x00121881, // 33 ^ 4
+};
+static uvec32 kHashMul3 = {
+ 0x00008c61, // 33 ^ 3
+ 0x00000441, // 33 ^ 2
+ 0x00000021, // 33 ^ 1
+ 0x00000001, // 33 ^ 0
+};
+
+// 27: 66 0F 38 40 C6 pmulld xmm0,xmm6
+// 44: 66 0F 38 40 DD pmulld xmm3,xmm5
+// 59: 66 0F 38 40 E5 pmulld xmm4,xmm5
+// 72: 66 0F 38 40 D5 pmulld xmm2,xmm5
+// 83: 66 0F 38 40 CD pmulld xmm1,xmm5
+#define pmulld(reg) _asm _emit 0x66 _asm _emit 0x0F _asm _emit 0x38 \
+ _asm _emit 0x40 _asm _emit reg
+
+__declspec(naked)
+uint32 HashDjb2_SSE41(const uint8* src, int count, uint32 seed) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov ecx, [esp + 8] // count
+ movd xmm0, [esp + 12] // seed
+
+ pxor xmm7, xmm7 // constant 0 for unpck
+ movdqa xmm6, kHash16x33
+
+ wloop:
+ movdqu xmm1, [eax] // src[0-15]
+ lea eax, [eax + 16]
+ pmulld(0xc6) // pmulld xmm0,xmm6 hash *= 33 ^ 16
+ movdqa xmm5, kHashMul0
+ movdqa xmm2, xmm1
+ punpcklbw xmm2, xmm7 // src[0-7]
+ movdqa xmm3, xmm2
+ punpcklwd xmm3, xmm7 // src[0-3]
+ pmulld(0xdd) // pmulld xmm3, xmm5
+ movdqa xmm5, kHashMul1
+ movdqa xmm4, xmm2
+ punpckhwd xmm4, xmm7 // src[4-7]
+ pmulld(0xe5) // pmulld xmm4, xmm5
+ movdqa xmm5, kHashMul2
+ punpckhbw xmm1, xmm7 // src[8-15]
+ movdqa xmm2, xmm1
+ punpcklwd xmm2, xmm7 // src[8-11]
+ pmulld(0xd5) // pmulld xmm2, xmm5
+ movdqa xmm5, kHashMul3
+ punpckhwd xmm1, xmm7 // src[12-15]
+ pmulld(0xcd) // pmulld xmm1, xmm5
+ paddd xmm3, xmm4 // add 16 results
+ paddd xmm1, xmm2
+ paddd xmm1, xmm3
+
+ pshufd xmm2, xmm1, 0x0e // upper 2 dwords
+ paddd xmm1, xmm2
+ pshufd xmm2, xmm1, 0x01
+ paddd xmm1, xmm2
+ paddd xmm0, xmm1
+ sub ecx, 16
+ jg wloop
+
+ movd eax, xmm0 // return hash
+ ret
+ }
+}
+
+// Visual C 2012 required for AVX2.
+#if _MSC_VER >= 1700
+__declspec(naked)
+uint32 HashDjb2_AVX2(const uint8* src, int count, uint32 seed) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov ecx, [esp + 8] // count
+ movd xmm0, [esp + 12] // seed
+ movdqa xmm6, kHash16x33
+
+ wloop:
+ vpmovzxbd xmm3, dword ptr [eax] // src[0-3]
+ pmulld xmm0, xmm6 // hash *= 33 ^ 16
+ vpmovzxbd xmm4, dword ptr [eax + 4] // src[4-7]
+ pmulld xmm3, kHashMul0
+ vpmovzxbd xmm2, dword ptr [eax + 8] // src[8-11]
+ pmulld xmm4, kHashMul1
+ vpmovzxbd xmm1, dword ptr [eax + 12] // src[12-15]
+ pmulld xmm2, kHashMul2
+ lea eax, [eax + 16]
+ pmulld xmm1, kHashMul3
+ paddd xmm3, xmm4 // add 16 results
+ paddd xmm1, xmm2
+ paddd xmm1, xmm3
+ pshufd xmm2, xmm1, 0x0e // upper 2 dwords
+ paddd xmm1, xmm2
+ pshufd xmm2, xmm1, 0x01
+ paddd xmm1, xmm2
+ paddd xmm0, xmm1
+ sub ecx, 16
+ jg wloop
+
+ movd eax, xmm0 // return hash
+ ret
+ }
+}
+#endif // _MSC_VER >= 1700
+#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert.cc b/media/libaom/src/third_party/libyuv/source/convert.cc
new file mode 100644
index 000000000..3ad6bd7a4
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert.cc
@@ -0,0 +1,1389 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/convert.h"
+
+#include "libyuv/basic_types.h"
+#include "libyuv/cpu_id.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/rotate.h"
+#include "libyuv/scale.h" // For ScalePlane()
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s)
+static __inline int Abs(int v) {
+ return v >= 0 ? v : -v;
+}
+
+// Any I4xx To I420 format with mirroring.
+static int I4xxToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int src_y_width, int src_y_height,
+ int src_uv_width, int src_uv_height) {
+ const int dst_y_width = Abs(src_y_width);
+ const int dst_y_height = Abs(src_y_height);
+ const int dst_uv_width = SUBSAMPLE(dst_y_width, 1, 1);
+ const int dst_uv_height = SUBSAMPLE(dst_y_height, 1, 1);
+ if (src_y_width == 0 || src_y_height == 0 ||
+ src_uv_width == 0 || src_uv_height == 0) {
+ return -1;
+ }
+ ScalePlane(src_y, src_stride_y, src_y_width, src_y_height,
+ dst_y, dst_stride_y, dst_y_width, dst_y_height,
+ kFilterBilinear);
+ ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height,
+ dst_u, dst_stride_u, dst_uv_width, dst_uv_height,
+ kFilterBilinear);
+ ScalePlane(src_v, src_stride_v, src_uv_width, src_uv_height,
+ dst_v, dst_stride_v, dst_uv_width, dst_uv_height,
+ kFilterBilinear);
+ return 0;
+}
+
+// Copy I420 with optional flipping
+// TODO(fbarchard): Use Scale plane which supports mirroring, but ensure
+// is does row coalescing.
+LIBYUV_API
+int I420Copy(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ if (!src_y || !src_u || !src_v ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_u = src_u + (halfheight - 1) * src_stride_u;
+ src_v = src_v + (halfheight - 1) * src_stride_v;
+ src_stride_y = -src_stride_y;
+ src_stride_u = -src_stride_u;
+ src_stride_v = -src_stride_v;
+ }
+
+ if (dst_y) {
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ }
+ // Copy UV planes.
+ CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight);
+ CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight);
+ return 0;
+}
+
+// 422 chroma is 1/2 width, 1x height
+// 420 chroma is 1/2 width, 1/2 height
+LIBYUV_API
+int I422ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ const int src_uv_width = SUBSAMPLE(width, 1, 1);
+ return I4xxToI420(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height,
+ src_uv_width, height);
+}
+
+// 444 chroma is 1x width, 1x height
+// 420 chroma is 1/2 width, 1/2 height
+LIBYUV_API
+int I444ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ return I4xxToI420(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height,
+ width, height);
+}
+
+// 411 chroma is 1/4 width, 1x height
+// 420 chroma is 1/2 width, 1/2 height
+LIBYUV_API
+int I411ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ const int src_uv_width = SUBSAMPLE(width, 3, 2);
+ return I4xxToI420(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height,
+ src_uv_width, height);
+}
+
+// I400 is greyscale typically used in MJPG
+LIBYUV_API
+int I400ToI420(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ if (!src_y || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ SetPlane(dst_u, dst_stride_u, halfwidth, halfheight, 128);
+ SetPlane(dst_v, dst_stride_v, halfwidth, halfheight, 128);
+ return 0;
+}
+
+static void CopyPlane2(const uint8* src, int src_stride_0, int src_stride_1,
+ uint8* dst, int dst_stride,
+ int width, int height) {
+ int y;
+ void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C;
+#if defined(HAS_COPYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_AVX)
+ if (TestCpuFlag(kCpuHasAVX)) {
+ CopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+ }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ CopyRow = CopyRow_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ CopyRow = CopyRow_MIPS;
+ }
+#endif
+
+ // Copy plane
+ for (y = 0; y < height - 1; y += 2) {
+ CopyRow(src, dst, width);
+ CopyRow(src + src_stride_0, dst + dst_stride, width);
+ src += src_stride_0 + src_stride_1;
+ dst += dst_stride * 2;
+ }
+ if (height & 1) {
+ CopyRow(src, dst, width);
+ }
+}
+
+// Support converting from FOURCC_M420
+// Useful for bandwidth constrained transports like USB 1.0 and 2.0 and for
+// easy conversion to I420.
+// M420 format description:
+// M420 is row biplanar 420: 2 rows of Y and 1 row of UV.
+// Chroma is half width / half height. (420)
+// src_stride_m420 is row planar. Normally this will be the width in pixels.
+// The UV plane is half width, but 2 values, so src_stride_m420 applies to
+// this as well as the two Y planes.
+static int X420ToI420(const uint8* src_y,
+ int src_stride_y0, int src_stride_y1,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) =
+ SplitUVRow_C;
+ if (!src_y || !src_uv ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ dst_y = dst_y + (height - 1) * dst_stride_y;
+ dst_u = dst_u + (halfheight - 1) * dst_stride_u;
+ dst_v = dst_v + (halfheight - 1) * dst_stride_v;
+ dst_stride_y = -dst_stride_y;
+ dst_stride_u = -dst_stride_u;
+ dst_stride_v = -dst_stride_v;
+ }
+ // Coalesce rows.
+ if (src_stride_y0 == width &&
+ src_stride_y1 == width &&
+ dst_stride_y == width) {
+ width *= height;
+ height = 1;
+ src_stride_y0 = src_stride_y1 = dst_stride_y = 0;
+ }
+ // Coalesce rows.
+ if (src_stride_uv == halfwidth * 2 &&
+ dst_stride_u == halfwidth &&
+ dst_stride_v == halfwidth) {
+ halfwidth *= halfheight;
+ halfheight = 1;
+ src_stride_uv = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_SPLITUVROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SplitUVRow = SplitUVRow_Any_SSE2;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ SplitUVRow = SplitUVRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ SplitUVRow = SplitUVRow_Any_AVX2;
+ if (IS_ALIGNED(halfwidth, 32)) {
+ SplitUVRow = SplitUVRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SplitUVRow = SplitUVRow_Any_NEON;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ SplitUVRow = SplitUVRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src_uv, 4) && IS_ALIGNED(src_stride_uv, 4) &&
+ IS_ALIGNED(dst_u, 4) && IS_ALIGNED(dst_stride_u, 4) &&
+ IS_ALIGNED(dst_v, 4) && IS_ALIGNED(dst_stride_v, 4)) {
+ SplitUVRow = SplitUVRow_Any_MIPS_DSPR2;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ SplitUVRow = SplitUVRow_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ if (dst_y) {
+ if (src_stride_y0 == src_stride_y1) {
+ CopyPlane(src_y, src_stride_y0, dst_y, dst_stride_y, width, height);
+ } else {
+ CopyPlane2(src_y, src_stride_y0, src_stride_y1, dst_y, dst_stride_y,
+ width, height);
+ }
+ }
+
+ for (y = 0; y < halfheight; ++y) {
+ // Copy a row of UV.
+ SplitUVRow(src_uv, dst_u, dst_v, halfwidth);
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ src_uv += src_stride_uv;
+ }
+ return 0;
+}
+
+// Convert NV12 to I420.
+LIBYUV_API
+int NV12ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ return X420ToI420(src_y, src_stride_y, src_stride_y,
+ src_uv, src_stride_uv,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height);
+}
+
+// Convert NV21 to I420. Same as NV12 but u and v pointers swapped.
+LIBYUV_API
+int NV21ToI420(const uint8* src_y, int src_stride_y,
+ const uint8* src_vu, int src_stride_vu,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ return X420ToI420(src_y, src_stride_y, src_stride_y,
+ src_vu, src_stride_vu,
+ dst_y, dst_stride_y,
+ dst_v, dst_stride_v,
+ dst_u, dst_stride_u,
+ width, height);
+}
+
+// Convert M420 to I420.
+LIBYUV_API
+int M420ToI420(const uint8* src_m420, int src_stride_m420,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ return X420ToI420(src_m420, src_stride_m420, src_stride_m420 * 2,
+ src_m420 + src_stride_m420 * 2, src_stride_m420 * 3,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height);
+}
+
+// Convert YUY2 to I420.
+LIBYUV_API
+int YUY2ToI420(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*YUY2ToUVRow)(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) = YUY2ToUVRow_C;
+ void (*YUY2ToYRow)(const uint8* src_yuy2,
+ uint8* dst_y, int pix) = YUY2ToYRow_C;
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2;
+ src_stride_yuy2 = -src_stride_yuy2;
+ }
+#if defined(HAS_YUY2TOYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ YUY2ToUVRow = YUY2ToUVRow_Any_SSE2;
+ YUY2ToYRow = YUY2ToYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ YUY2ToUVRow = YUY2ToUVRow_SSE2;
+ YUY2ToYRow = YUY2ToYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_YUY2TOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ YUY2ToUVRow = YUY2ToUVRow_Any_AVX2;
+ YUY2ToYRow = YUY2ToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ YUY2ToUVRow = YUY2ToUVRow_AVX2;
+ YUY2ToYRow = YUY2ToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_YUY2TOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ YUY2ToYRow = YUY2ToYRow_Any_NEON;
+ YUY2ToUVRow = YUY2ToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ YUY2ToYRow = YUY2ToYRow_NEON;
+ YUY2ToUVRow = YUY2ToUVRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ YUY2ToUVRow(src_yuy2, src_stride_yuy2, dst_u, dst_v, width);
+ YUY2ToYRow(src_yuy2, dst_y, width);
+ YUY2ToYRow(src_yuy2 + src_stride_yuy2, dst_y + dst_stride_y, width);
+ src_yuy2 += src_stride_yuy2 * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ YUY2ToUVRow(src_yuy2, 0, dst_u, dst_v, width);
+ YUY2ToYRow(src_yuy2, dst_y, width);
+ }
+ return 0;
+}
+
+// Convert UYVY to I420.
+LIBYUV_API
+int UYVYToI420(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*UYVYToUVRow)(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) = UYVYToUVRow_C;
+ void (*UYVYToYRow)(const uint8* src_uyvy,
+ uint8* dst_y, int pix) = UYVYToYRow_C;
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy;
+ src_stride_uyvy = -src_stride_uyvy;
+ }
+#if defined(HAS_UYVYTOYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ UYVYToUVRow = UYVYToUVRow_Any_SSE2;
+ UYVYToYRow = UYVYToYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ UYVYToUVRow = UYVYToUVRow_SSE2;
+ UYVYToYRow = UYVYToYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_UYVYTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ UYVYToUVRow = UYVYToUVRow_Any_AVX2;
+ UYVYToYRow = UYVYToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ UYVYToUVRow = UYVYToUVRow_AVX2;
+ UYVYToYRow = UYVYToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_UYVYTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ UYVYToYRow = UYVYToYRow_Any_NEON;
+ UYVYToUVRow = UYVYToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ UYVYToYRow = UYVYToYRow_NEON;
+ UYVYToUVRow = UYVYToUVRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ UYVYToUVRow(src_uyvy, src_stride_uyvy, dst_u, dst_v, width);
+ UYVYToYRow(src_uyvy, dst_y, width);
+ UYVYToYRow(src_uyvy + src_stride_uyvy, dst_y + dst_stride_y, width);
+ src_uyvy += src_stride_uyvy * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ UYVYToUVRow(src_uyvy, 0, dst_u, dst_v, width);
+ UYVYToYRow(src_uyvy, dst_y, width);
+ }
+ return 0;
+}
+
+// Convert ARGB to I420.
+LIBYUV_API
+int ARGBToI420(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ if (!src_argb ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVRow = ARGBToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ ARGBToUVRow(src_argb, src_stride_argb, dst_u, dst_v, width);
+ ARGBToYRow(src_argb, dst_y, width);
+ ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
+ src_argb += src_stride_argb * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ ARGBToUVRow(src_argb, 0, dst_u, dst_v, width);
+ ARGBToYRow(src_argb, dst_y, width);
+ }
+ return 0;
+}
+
+// Convert BGRA to I420.
+LIBYUV_API
+int BGRAToI420(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*BGRAToUVRow)(const uint8* src_bgra0, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int width) = BGRAToUVRow_C;
+ void (*BGRAToYRow)(const uint8* src_bgra, uint8* dst_y, int pix) =
+ BGRAToYRow_C;
+ if (!src_bgra ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_bgra = src_bgra + (height - 1) * src_stride_bgra;
+ src_stride_bgra = -src_stride_bgra;
+ }
+#if defined(HAS_BGRATOYROW_SSSE3) && defined(HAS_BGRATOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ BGRAToUVRow = BGRAToUVRow_Any_SSSE3;
+ BGRAToYRow = BGRAToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ BGRAToUVRow = BGRAToUVRow_SSSE3;
+ BGRAToYRow = BGRAToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_BGRATOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ BGRAToYRow = BGRAToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ BGRAToYRow = BGRAToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_BGRATOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ BGRAToUVRow = BGRAToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ BGRAToUVRow = BGRAToUVRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ BGRAToUVRow(src_bgra, src_stride_bgra, dst_u, dst_v, width);
+ BGRAToYRow(src_bgra, dst_y, width);
+ BGRAToYRow(src_bgra + src_stride_bgra, dst_y + dst_stride_y, width);
+ src_bgra += src_stride_bgra * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ BGRAToUVRow(src_bgra, 0, dst_u, dst_v, width);
+ BGRAToYRow(src_bgra, dst_y, width);
+ }
+ return 0;
+}
+
+// Convert ABGR to I420.
+LIBYUV_API
+int ABGRToI420(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ABGRToUVRow)(const uint8* src_abgr0, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int width) = ABGRToUVRow_C;
+ void (*ABGRToYRow)(const uint8* src_abgr, uint8* dst_y, int pix) =
+ ABGRToYRow_C;
+ if (!src_abgr ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_abgr = src_abgr + (height - 1) * src_stride_abgr;
+ src_stride_abgr = -src_stride_abgr;
+ }
+#if defined(HAS_ABGRTOYROW_SSSE3) && defined(HAS_ABGRTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ABGRToUVRow = ABGRToUVRow_Any_SSSE3;
+ ABGRToYRow = ABGRToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ABGRToUVRow = ABGRToUVRow_SSSE3;
+ ABGRToYRow = ABGRToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ABGRTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ABGRToYRow = ABGRToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ABGRToYRow = ABGRToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ABGRTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ABGRToUVRow = ABGRToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ABGRToUVRow = ABGRToUVRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ ABGRToUVRow(src_abgr, src_stride_abgr, dst_u, dst_v, width);
+ ABGRToYRow(src_abgr, dst_y, width);
+ ABGRToYRow(src_abgr + src_stride_abgr, dst_y + dst_stride_y, width);
+ src_abgr += src_stride_abgr * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ ABGRToUVRow(src_abgr, 0, dst_u, dst_v, width);
+ ABGRToYRow(src_abgr, dst_y, width);
+ }
+ return 0;
+}
+
+// Convert RGBA to I420.
+LIBYUV_API
+int RGBAToI420(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*RGBAToUVRow)(const uint8* src_rgba0, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int width) = RGBAToUVRow_C;
+ void (*RGBAToYRow)(const uint8* src_rgba, uint8* dst_y, int pix) =
+ RGBAToYRow_C;
+ if (!src_rgba ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_rgba = src_rgba + (height - 1) * src_stride_rgba;
+ src_stride_rgba = -src_stride_rgba;
+ }
+#if defined(HAS_RGBATOYROW_SSSE3) && defined(HAS_RGBATOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ RGBAToUVRow = RGBAToUVRow_Any_SSSE3;
+ RGBAToYRow = RGBAToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ RGBAToUVRow = RGBAToUVRow_SSSE3;
+ RGBAToYRow = RGBAToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_RGBATOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RGBAToYRow = RGBAToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RGBAToYRow = RGBAToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_RGBATOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RGBAToUVRow = RGBAToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ RGBAToUVRow = RGBAToUVRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ RGBAToUVRow(src_rgba, src_stride_rgba, dst_u, dst_v, width);
+ RGBAToYRow(src_rgba, dst_y, width);
+ RGBAToYRow(src_rgba + src_stride_rgba, dst_y + dst_stride_y, width);
+ src_rgba += src_stride_rgba * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ RGBAToUVRow(src_rgba, 0, dst_u, dst_v, width);
+ RGBAToYRow(src_rgba, dst_y, width);
+ }
+ return 0;
+}
+
+// Convert RGB24 to I420.
+LIBYUV_API
+int RGB24ToI420(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+#if defined(HAS_RGB24TOYROW_NEON)
+ void (*RGB24ToUVRow)(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_u, uint8* dst_v, int width) = RGB24ToUVRow_C;
+ void (*RGB24ToYRow)(const uint8* src_rgb24, uint8* dst_y, int pix) =
+ RGB24ToYRow_C;
+#else
+ void (*RGB24ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ RGB24ToARGBRow_C;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+#endif
+ if (!src_rgb24 || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_rgb24 = src_rgb24 + (height - 1) * src_stride_rgb24;
+ src_stride_rgb24 = -src_stride_rgb24;
+ }
+
+// Neon version does direct RGB24 to YUV.
+#if defined(HAS_RGB24TOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RGB24ToUVRow = RGB24ToUVRow_Any_NEON;
+ RGB24ToYRow = RGB24ToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RGB24ToYRow = RGB24ToYRow_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ RGB24ToUVRow = RGB24ToUVRow_NEON;
+ }
+ }
+ }
+// Other platforms do intermediate conversion from RGB24 to ARGB.
+#else
+#if defined(HAS_RGB24TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ RGB24ToARGBRow = RGB24ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+ {
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+#if defined(HAS_RGB24TOYROW_NEON)
+ RGB24ToUVRow(src_rgb24, src_stride_rgb24, dst_u, dst_v, width);
+ RGB24ToYRow(src_rgb24, dst_y, width);
+ RGB24ToYRow(src_rgb24 + src_stride_rgb24, dst_y + dst_stride_y, width);
+#else
+ RGB24ToARGBRow(src_rgb24, row, width);
+ RGB24ToARGBRow(src_rgb24 + src_stride_rgb24, row + kRowSize, width);
+ ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+ ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
+#endif
+ src_rgb24 += src_stride_rgb24 * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+#if defined(HAS_RGB24TOYROW_NEON)
+ RGB24ToUVRow(src_rgb24, 0, dst_u, dst_v, width);
+ RGB24ToYRow(src_rgb24, dst_y, width);
+#else
+ RGB24ToARGBRow(src_rgb24, row, width);
+ ARGBToUVRow(row, 0, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+#endif
+ }
+#if !defined(HAS_RGB24TOYROW_NEON)
+ free_aligned_buffer_64(row);
+ }
+#endif
+ return 0;
+}
+
+// Convert RAW to I420.
+LIBYUV_API
+int RAWToI420(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+#if defined(HAS_RAWTOYROW_NEON)
+ void (*RAWToUVRow)(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_u, uint8* dst_v, int width) = RAWToUVRow_C;
+ void (*RAWToYRow)(const uint8* src_raw, uint8* dst_y, int pix) =
+ RAWToYRow_C;
+#else
+ void (*RAWToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ RAWToARGBRow_C;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+#endif
+ if (!src_raw || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_raw = src_raw + (height - 1) * src_stride_raw;
+ src_stride_raw = -src_stride_raw;
+ }
+
+// Neon version does direct RAW to YUV.
+#if defined(HAS_RAWTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RAWToUVRow = RAWToUVRow_Any_NEON;
+ RAWToYRow = RAWToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RAWToYRow = RAWToYRow_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ RAWToUVRow = RAWToUVRow_NEON;
+ }
+ }
+ }
+// Other platforms do intermediate conversion from RAW to ARGB.
+#else
+#if defined(HAS_RAWTOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ RAWToARGBRow = RAWToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ RAWToARGBRow = RAWToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+ {
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+#if defined(HAS_RAWTOYROW_NEON)
+ RAWToUVRow(src_raw, src_stride_raw, dst_u, dst_v, width);
+ RAWToYRow(src_raw, dst_y, width);
+ RAWToYRow(src_raw + src_stride_raw, dst_y + dst_stride_y, width);
+#else
+ RAWToARGBRow(src_raw, row, width);
+ RAWToARGBRow(src_raw + src_stride_raw, row + kRowSize, width);
+ ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+ ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
+#endif
+ src_raw += src_stride_raw * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+#if defined(HAS_RAWTOYROW_NEON)
+ RAWToUVRow(src_raw, 0, dst_u, dst_v, width);
+ RAWToYRow(src_raw, dst_y, width);
+#else
+ RAWToARGBRow(src_raw, row, width);
+ ARGBToUVRow(row, 0, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+#endif
+ }
+#if !defined(HAS_RAWTOYROW_NEON)
+ free_aligned_buffer_64(row);
+ }
+#endif
+ return 0;
+}
+
+// Convert RGB565 to I420.
+LIBYUV_API
+int RGB565ToI420(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+#if defined(HAS_RGB565TOYROW_NEON)
+ void (*RGB565ToUVRow)(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int width) = RGB565ToUVRow_C;
+ void (*RGB565ToYRow)(const uint8* src_rgb565, uint8* dst_y, int pix) =
+ RGB565ToYRow_C;
+#else
+ void (*RGB565ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ RGB565ToARGBRow_C;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+#endif
+ if (!src_rgb565 || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_rgb565 = src_rgb565 + (height - 1) * src_stride_rgb565;
+ src_stride_rgb565 = -src_stride_rgb565;
+ }
+
+// Neon version does direct RGB565 to YUV.
+#if defined(HAS_RGB565TOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RGB565ToUVRow = RGB565ToUVRow_Any_NEON;
+ RGB565ToYRow = RGB565ToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RGB565ToYRow = RGB565ToYRow_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ RGB565ToUVRow = RGB565ToUVRow_NEON;
+ }
+ }
+ }
+// Other platforms do intermediate conversion from RGB565 to ARGB.
+#else
+#if defined(HAS_RGB565TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_RGB565TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+ {
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+#if defined(HAS_RGB565TOYROW_NEON)
+ RGB565ToUVRow(src_rgb565, src_stride_rgb565, dst_u, dst_v, width);
+ RGB565ToYRow(src_rgb565, dst_y, width);
+ RGB565ToYRow(src_rgb565 + src_stride_rgb565, dst_y + dst_stride_y, width);
+#else
+ RGB565ToARGBRow(src_rgb565, row, width);
+ RGB565ToARGBRow(src_rgb565 + src_stride_rgb565, row + kRowSize, width);
+ ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+ ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
+#endif
+ src_rgb565 += src_stride_rgb565 * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+#if defined(HAS_RGB565TOYROW_NEON)
+ RGB565ToUVRow(src_rgb565, 0, dst_u, dst_v, width);
+ RGB565ToYRow(src_rgb565, dst_y, width);
+#else
+ RGB565ToARGBRow(src_rgb565, row, width);
+ ARGBToUVRow(row, 0, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+#endif
+ }
+#if !defined(HAS_RGB565TOYROW_NEON)
+ free_aligned_buffer_64(row);
+ }
+#endif
+ return 0;
+}
+
+// Convert ARGB1555 to I420.
+LIBYUV_API
+int ARGB1555ToI420(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+#if defined(HAS_ARGB1555TOYROW_NEON)
+ void (*ARGB1555ToUVRow)(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int width) = ARGB1555ToUVRow_C;
+ void (*ARGB1555ToYRow)(const uint8* src_argb1555, uint8* dst_y, int pix) =
+ ARGB1555ToYRow_C;
+#else
+ void (*ARGB1555ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ ARGB1555ToARGBRow_C;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+#endif
+ if (!src_argb1555 || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb1555 = src_argb1555 + (height - 1) * src_stride_argb1555;
+ src_stride_argb1555 = -src_stride_argb1555;
+ }
+
+// Neon version does direct ARGB1555 to YUV.
+#if defined(HAS_ARGB1555TOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGB1555ToUVRow = ARGB1555ToUVRow_Any_NEON;
+ ARGB1555ToYRow = ARGB1555ToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB1555ToYRow = ARGB1555ToYRow_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGB1555ToUVRow = ARGB1555ToUVRow_NEON;
+ }
+ }
+ }
+// Other platforms do intermediate conversion from ARGB1555 to ARGB.
+#else
+#if defined(HAS_ARGB1555TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGB1555TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+ {
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+#if defined(HAS_ARGB1555TOYROW_NEON)
+ ARGB1555ToUVRow(src_argb1555, src_stride_argb1555, dst_u, dst_v, width);
+ ARGB1555ToYRow(src_argb1555, dst_y, width);
+ ARGB1555ToYRow(src_argb1555 + src_stride_argb1555, dst_y + dst_stride_y,
+ width);
+#else
+ ARGB1555ToARGBRow(src_argb1555, row, width);
+ ARGB1555ToARGBRow(src_argb1555 + src_stride_argb1555, row + kRowSize,
+ width);
+ ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+ ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
+#endif
+ src_argb1555 += src_stride_argb1555 * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+#if defined(HAS_ARGB1555TOYROW_NEON)
+ ARGB1555ToUVRow(src_argb1555, 0, dst_u, dst_v, width);
+ ARGB1555ToYRow(src_argb1555, dst_y, width);
+#else
+ ARGB1555ToARGBRow(src_argb1555, row, width);
+ ARGBToUVRow(row, 0, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+#endif
+ }
+#if !defined(HAS_ARGB1555TOYROW_NEON)
+ free_aligned_buffer_64(row);
+ }
+#endif
+ return 0;
+}
+
+// Convert ARGB4444 to I420.
+LIBYUV_API
+int ARGB4444ToI420(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+#if defined(HAS_ARGB4444TOYROW_NEON)
+ void (*ARGB4444ToUVRow)(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int width) = ARGB4444ToUVRow_C;
+ void (*ARGB4444ToYRow)(const uint8* src_argb4444, uint8* dst_y, int pix) =
+ ARGB4444ToYRow_C;
+#else
+ void (*ARGB4444ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ ARGB4444ToARGBRow_C;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+#endif
+ if (!src_argb4444 || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb4444 = src_argb4444 + (height - 1) * src_stride_argb4444;
+ src_stride_argb4444 = -src_stride_argb4444;
+ }
+
+// Neon version does direct ARGB4444 to YUV.
+#if defined(HAS_ARGB4444TOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGB4444ToUVRow = ARGB4444ToUVRow_Any_NEON;
+ ARGB4444ToYRow = ARGB4444ToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB4444ToYRow = ARGB4444ToYRow_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGB4444ToUVRow = ARGB4444ToUVRow_NEON;
+ }
+ }
+ }
+// Other platforms do intermediate conversion from ARGB4444 to ARGB.
+#else
+#if defined(HAS_ARGB4444TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGB4444TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+ {
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+#if defined(HAS_ARGB4444TOYROW_NEON)
+ ARGB4444ToUVRow(src_argb4444, src_stride_argb4444, dst_u, dst_v, width);
+ ARGB4444ToYRow(src_argb4444, dst_y, width);
+ ARGB4444ToYRow(src_argb4444 + src_stride_argb4444, dst_y + dst_stride_y,
+ width);
+#else
+ ARGB4444ToARGBRow(src_argb4444, row, width);
+ ARGB4444ToARGBRow(src_argb4444 + src_stride_argb4444, row + kRowSize,
+ width);
+ ARGBToUVRow(row, kRowSize, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+ ARGBToYRow(row + kRowSize, dst_y + dst_stride_y, width);
+#endif
+ src_argb4444 += src_stride_argb4444 * 2;
+ dst_y += dst_stride_y * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+#if defined(HAS_ARGB4444TOYROW_NEON)
+ ARGB4444ToUVRow(src_argb4444, 0, dst_u, dst_v, width);
+ ARGB4444ToYRow(src_argb4444, dst_y, width);
+#else
+ ARGB4444ToARGBRow(src_argb4444, row, width);
+ ARGBToUVRow(row, 0, dst_u, dst_v, width);
+ ARGBToYRow(row, dst_y, width);
+#endif
+ }
+#if !defined(HAS_ARGB4444TOYROW_NEON)
+ free_aligned_buffer_64(row);
+ }
+#endif
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert_argb.cc b/media/libaom/src/third_party/libyuv/source/convert_argb.cc
new file mode 100644
index 000000000..44756bc41
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert_argb.cc
@@ -0,0 +1,1155 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/convert_argb.h"
+
+#include "libyuv/cpu_id.h"
+#ifdef HAVE_JPEG
+#include "libyuv/mjpeg_decoder.h"
+#endif
+#include "libyuv/rotate_argb.h"
+#include "libyuv/row.h"
+#include "libyuv/video_common.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Copy ARGB with optional flipping
+LIBYUV_API
+int ARGBCopy(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ if (!src_argb || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+
+ CopyPlane(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
+ width * 4, height);
+ return 0;
+}
+
+// Convert I444 to ARGB.
+LIBYUV_API
+int I444ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*I444ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I444ToARGBRow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u == width &&
+ src_stride_v == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0;
+ }
+#if defined(HAS_I444TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I444ToARGBRow = I444ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I444ToARGBRow = I444ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I444TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I444ToARGBRow = I444ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I444ToARGBRow = I444ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I444TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I444ToARGBRow = I444ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I444ToARGBRow = I444ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I444ToARGBRow(src_y, src_u, src_v, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+// Convert I422 to ARGB.
+LIBYUV_API
+int I422ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*I422ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToARGBRow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0;
+ }
+#if defined(HAS_I422TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToARGBRow = I422ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToARGBRow = I422ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToARGBRow = I422ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToARGBRow(src_y, src_u, src_v, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+// Convert I411 to ARGB.
+LIBYUV_API
+int I411ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*I411ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I411ToARGBRow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 4 == width &&
+ src_stride_v * 4 == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0;
+ }
+#if defined(HAS_I411TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I411ToARGBRow = I411ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I411ToARGBRow = I411ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I411TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I411ToARGBRow = I411ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I411ToARGBRow = I411ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I411TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I411ToARGBRow = I411ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I411ToARGBRow = I411ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I411ToARGBRow(src_y, src_u, src_v, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+// Convert I400 to ARGB.
+LIBYUV_API
+int I400ToARGB(const uint8* src_y, int src_stride_y,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*I400ToARGBRow)(const uint8* y_buf,
+ uint8* rgb_buf,
+ int width) = I400ToARGBRow_C;
+ if (!src_y || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = dst_stride_argb = 0;
+ }
+#if defined(HAS_I400TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I400ToARGBRow = I400ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ I400ToARGBRow = I400ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I400TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I400ToARGBRow = I400ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I400ToARGBRow = I400ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I400TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I400ToARGBRow = I400ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I400ToARGBRow = I400ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I400ToARGBRow(src_y, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ }
+ return 0;
+}
+
+// Convert J400 to ARGB.
+LIBYUV_API
+int J400ToARGB(const uint8* src_y, int src_stride_y,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*J400ToARGBRow)(const uint8* src_y, uint8* dst_argb, int pix) =
+ J400ToARGBRow_C;
+ if (!src_y || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = dst_stride_argb = 0;
+ }
+#if defined(HAS_J400TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ J400ToARGBRow = J400ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ J400ToARGBRow = J400ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_J400TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ J400ToARGBRow = J400ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ J400ToARGBRow = J400ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_J400TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ J400ToARGBRow = J400ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ J400ToARGBRow = J400ToARGBRow_NEON;
+ }
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ J400ToARGBRow(src_y, dst_argb, width);
+ src_y += src_stride_y;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Shuffle table for converting BGRA to ARGB.
+static uvec8 kShuffleMaskBGRAToARGB = {
+ 3u, 2u, 1u, 0u, 7u, 6u, 5u, 4u, 11u, 10u, 9u, 8u, 15u, 14u, 13u, 12u
+};
+
+// Shuffle table for converting ABGR to ARGB.
+static uvec8 kShuffleMaskABGRToARGB = {
+ 2u, 1u, 0u, 3u, 6u, 5u, 4u, 7u, 10u, 9u, 8u, 11u, 14u, 13u, 12u, 15u
+};
+
+// Shuffle table for converting RGBA to ARGB.
+static uvec8 kShuffleMaskRGBAToARGB = {
+ 1u, 2u, 3u, 0u, 5u, 6u, 7u, 4u, 9u, 10u, 11u, 8u, 13u, 14u, 15u, 12u
+};
+
+// Convert BGRA to ARGB.
+LIBYUV_API
+int BGRAToARGB(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ return ARGBShuffle(src_bgra, src_stride_bgra,
+ dst_argb, dst_stride_argb,
+ (const uint8*)(&kShuffleMaskBGRAToARGB),
+ width, height);
+}
+
+// Convert ARGB to BGRA (same as BGRAToARGB).
+LIBYUV_API
+int ARGBToBGRA(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ return ARGBShuffle(src_bgra, src_stride_bgra,
+ dst_argb, dst_stride_argb,
+ (const uint8*)(&kShuffleMaskBGRAToARGB),
+ width, height);
+}
+
+// Convert ABGR to ARGB.
+LIBYUV_API
+int ABGRToARGB(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ return ARGBShuffle(src_abgr, src_stride_abgr,
+ dst_argb, dst_stride_argb,
+ (const uint8*)(&kShuffleMaskABGRToARGB),
+ width, height);
+}
+
+// Convert ARGB to ABGR to (same as ABGRToARGB).
+LIBYUV_API
+int ARGBToABGR(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ return ARGBShuffle(src_abgr, src_stride_abgr,
+ dst_argb, dst_stride_argb,
+ (const uint8*)(&kShuffleMaskABGRToARGB),
+ width, height);
+}
+
+// Convert RGBA to ARGB.
+LIBYUV_API
+int RGBAToARGB(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ return ARGBShuffle(src_rgba, src_stride_rgba,
+ dst_argb, dst_stride_argb,
+ (const uint8*)(&kShuffleMaskRGBAToARGB),
+ width, height);
+}
+
+// Convert RGB24 to ARGB.
+LIBYUV_API
+int RGB24ToARGB(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*RGB24ToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ RGB24ToARGBRow_C;
+ if (!src_rgb24 || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_rgb24 = src_rgb24 + (height - 1) * src_stride_rgb24;
+ src_stride_rgb24 = -src_stride_rgb24;
+ }
+ // Coalesce rows.
+ if (src_stride_rgb24 == width * 3 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_rgb24 = dst_stride_argb = 0;
+ }
+#if defined(HAS_RGB24TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ RGB24ToARGBRow = RGB24ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ RGB24ToARGBRow = RGB24ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_RGB24TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RGB24ToARGBRow = RGB24ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RGB24ToARGBRow = RGB24ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ RGB24ToARGBRow(src_rgb24, dst_argb, width);
+ src_rgb24 += src_stride_rgb24;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert RAW to ARGB.
+LIBYUV_API
+int RAWToARGB(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*RAWToARGBRow)(const uint8* src_rgb, uint8* dst_argb, int pix) =
+ RAWToARGBRow_C;
+ if (!src_raw || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_raw = src_raw + (height - 1) * src_stride_raw;
+ src_stride_raw = -src_stride_raw;
+ }
+ // Coalesce rows.
+ if (src_stride_raw == width * 3 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_raw = dst_stride_argb = 0;
+ }
+#if defined(HAS_RAWTOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ RAWToARGBRow = RAWToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ RAWToARGBRow = RAWToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_RAWTOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RAWToARGBRow = RAWToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RAWToARGBRow = RAWToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ RAWToARGBRow(src_raw, dst_argb, width);
+ src_raw += src_stride_raw;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert RGB565 to ARGB.
+LIBYUV_API
+int RGB565ToARGB(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*RGB565ToARGBRow)(const uint8* src_rgb565, uint8* dst_argb, int pix) =
+ RGB565ToARGBRow_C;
+ if (!src_rgb565 || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_rgb565 = src_rgb565 + (height - 1) * src_stride_rgb565;
+ src_stride_rgb565 = -src_stride_rgb565;
+ }
+ // Coalesce rows.
+ if (src_stride_rgb565 == width * 2 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_rgb565 = dst_stride_argb = 0;
+ }
+#if defined(HAS_RGB565TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_RGB565TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_RGB565TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ RGB565ToARGBRow = RGB565ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ RGB565ToARGBRow(src_rgb565, dst_argb, width);
+ src_rgb565 += src_stride_rgb565;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert ARGB1555 to ARGB.
+LIBYUV_API
+int ARGB1555ToARGB(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGB1555ToARGBRow)(const uint8* src_argb1555, uint8* dst_argb,
+ int pix) = ARGB1555ToARGBRow_C;
+ if (!src_argb1555 || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb1555 = src_argb1555 + (height - 1) * src_stride_argb1555;
+ src_stride_argb1555 = -src_stride_argb1555;
+ }
+ // Coalesce rows.
+ if (src_stride_argb1555 == width * 2 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb1555 = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGB1555TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGB1555TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGB1555TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB1555ToARGBRow = ARGB1555ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGB1555ToARGBRow(src_argb1555, dst_argb, width);
+ src_argb1555 += src_stride_argb1555;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert ARGB4444 to ARGB.
+LIBYUV_API
+int ARGB4444ToARGB(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGB4444ToARGBRow)(const uint8* src_argb4444, uint8* dst_argb,
+ int pix) = ARGB4444ToARGBRow_C;
+ if (!src_argb4444 || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb4444 = src_argb4444 + (height - 1) * src_stride_argb4444;
+ src_stride_argb4444 = -src_stride_argb4444;
+ }
+ // Coalesce rows.
+ if (src_stride_argb4444 == width * 2 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb4444 = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGB4444TOARGBROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_SSE2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGB4444TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGB4444TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGB4444ToARGBRow = ARGB4444ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGB4444ToARGBRow(src_argb4444, dst_argb, width);
+ src_argb4444 += src_stride_argb4444;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert NV12 to ARGB.
+LIBYUV_API
+int NV12ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*NV12ToARGBRow)(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* rgb_buf,
+ int width) = NV12ToARGBRow_C;
+ if (!src_y || !src_uv || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+#if defined(HAS_NV12TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ NV12ToARGBRow = NV12ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ NV12ToARGBRow = NV12ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_NV12TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ NV12ToARGBRow = NV12ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ NV12ToARGBRow = NV12ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_NV12TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ NV12ToARGBRow = NV12ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ NV12ToARGBRow = NV12ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ NV12ToARGBRow(src_y, src_uv, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_uv += src_stride_uv;
+ }
+ }
+ return 0;
+}
+
+// Convert NV21 to ARGB.
+LIBYUV_API
+int NV21ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*NV21ToARGBRow)(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* rgb_buf,
+ int width) = NV21ToARGBRow_C;
+ if (!src_y || !src_uv || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+#if defined(HAS_NV21TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ NV21ToARGBRow = NV21ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ NV21ToARGBRow = NV21ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_NV21TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ NV21ToARGBRow = NV21ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ NV21ToARGBRow = NV21ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_NV21TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ NV21ToARGBRow = NV21ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ NV21ToARGBRow = NV21ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ NV21ToARGBRow(src_y, src_uv, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_uv += src_stride_uv;
+ }
+ }
+ return 0;
+}
+
+// Convert M420 to ARGB.
+LIBYUV_API
+int M420ToARGB(const uint8* src_m420, int src_stride_m420,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*NV12ToARGBRow)(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* rgb_buf,
+ int width) = NV12ToARGBRow_C;
+ if (!src_m420 || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+#if defined(HAS_NV12TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ NV12ToARGBRow = NV12ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ NV12ToARGBRow = NV12ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_NV12TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ NV12ToARGBRow = NV12ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ NV12ToARGBRow = NV12ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_NV12TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ NV12ToARGBRow = NV12ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ NV12ToARGBRow = NV12ToARGBRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ NV12ToARGBRow(src_m420, src_m420 + src_stride_m420 * 2, dst_argb, width);
+ NV12ToARGBRow(src_m420 + src_stride_m420, src_m420 + src_stride_m420 * 2,
+ dst_argb + dst_stride_argb, width);
+ dst_argb += dst_stride_argb * 2;
+ src_m420 += src_stride_m420 * 3;
+ }
+ if (height & 1) {
+ NV12ToARGBRow(src_m420, src_m420 + src_stride_m420 * 2, dst_argb, width);
+ }
+ return 0;
+}
+
+// Convert YUY2 to ARGB.
+LIBYUV_API
+int YUY2ToARGB(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*YUY2ToARGBRow)(const uint8* src_yuy2, uint8* dst_argb, int pix) =
+ YUY2ToARGBRow_C;
+ if (!src_yuy2 || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2;
+ src_stride_yuy2 = -src_stride_yuy2;
+ }
+ // Coalesce rows.
+ if (src_stride_yuy2 == width * 2 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_yuy2 = dst_stride_argb = 0;
+ }
+#if defined(HAS_YUY2TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ YUY2ToARGBRow = YUY2ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ YUY2ToARGBRow = YUY2ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_YUY2TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ YUY2ToARGBRow = YUY2ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ YUY2ToARGBRow = YUY2ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_YUY2TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ YUY2ToARGBRow = YUY2ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ YUY2ToARGBRow = YUY2ToARGBRow_NEON;
+ }
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ YUY2ToARGBRow(src_yuy2, dst_argb, width);
+ src_yuy2 += src_stride_yuy2;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert UYVY to ARGB.
+LIBYUV_API
+int UYVYToARGB(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*UYVYToARGBRow)(const uint8* src_uyvy, uint8* dst_argb, int pix) =
+ UYVYToARGBRow_C;
+ if (!src_uyvy || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy;
+ src_stride_uyvy = -src_stride_uyvy;
+ }
+ // Coalesce rows.
+ if (src_stride_uyvy == width * 2 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_uyvy = dst_stride_argb = 0;
+ }
+#if defined(HAS_UYVYTOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ UYVYToARGBRow = UYVYToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ UYVYToARGBRow = UYVYToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_UYVYTOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ UYVYToARGBRow = UYVYToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ UYVYToARGBRow = UYVYToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_UYVYTOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ UYVYToARGBRow = UYVYToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ UYVYToARGBRow = UYVYToARGBRow_NEON;
+ }
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ UYVYToARGBRow(src_uyvy, dst_argb, width);
+ src_uyvy += src_stride_uyvy;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert J420 to ARGB.
+LIBYUV_API
+int J420ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*J422ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = J422ToARGBRow_C;
+ if (!src_y || !src_u || !src_v || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+#if defined(HAS_J422TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ J422ToARGBRow = J422ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ J422ToARGBRow = J422ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_J422TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ J422ToARGBRow = J422ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ J422ToARGBRow = J422ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_J422TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ J422ToARGBRow = J422ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ J422ToARGBRow = J422ToARGBRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_J422TOARGBROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ J422ToARGBRow = J422ToARGBRow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ J422ToARGBRow(src_y, src_u, src_v, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert J422 to ARGB.
+LIBYUV_API
+int J422ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*J422ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = J422ToARGBRow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_argb = 0;
+ }
+#if defined(HAS_J422TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ J422ToARGBRow = J422ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ J422ToARGBRow = J422ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_J422TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ J422ToARGBRow = J422ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ J422ToARGBRow = J422ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_J422TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ J422ToARGBRow = J422ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ J422ToARGBRow = J422ToARGBRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_J422TOARGBROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ J422ToARGBRow = J422ToARGBRow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ J422ToARGBRow(src_y, src_u, src_v, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert_from.cc b/media/libaom/src/third_party/libyuv/source/convert_from.cc
new file mode 100644
index 000000000..31f1ac992
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert_from.cc
@@ -0,0 +1,1348 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/convert_from.h"
+
+#include "libyuv/basic_types.h"
+#include "libyuv/convert.h" // For I420Copy
+#include "libyuv/cpu_id.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/rotate.h"
+#include "libyuv/scale.h" // For ScalePlane()
+#include "libyuv/video_common.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s)
+static __inline int Abs(int v) {
+ return v >= 0 ? v : -v;
+}
+
+// I420 To any I4xx YUV format with mirroring.
+static int I420ToI4xx(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int src_y_width, int src_y_height,
+ int dst_uv_width, int dst_uv_height) {
+ const int dst_y_width = Abs(src_y_width);
+ const int dst_y_height = Abs(src_y_height);
+ const int src_uv_width = SUBSAMPLE(src_y_width, 1, 1);
+ const int src_uv_height = SUBSAMPLE(src_y_height, 1, 1);
+ if (src_y_width == 0 || src_y_height == 0 ||
+ dst_uv_width <= 0 || dst_uv_height <= 0) {
+ return -1;
+ }
+ ScalePlane(src_y, src_stride_y, src_y_width, src_y_height,
+ dst_y, dst_stride_y, dst_y_width, dst_y_height,
+ kFilterBilinear);
+ ScalePlane(src_u, src_stride_u, src_uv_width, src_uv_height,
+ dst_u, dst_stride_u, dst_uv_width, dst_uv_height,
+ kFilterBilinear);
+ ScalePlane(src_v, src_stride_v, src_uv_width, src_uv_height,
+ dst_v, dst_stride_v, dst_uv_width, dst_uv_height,
+ kFilterBilinear);
+ return 0;
+}
+
+// 420 chroma is 1/2 width, 1/2 height
+// 422 chroma is 1/2 width, 1x height
+LIBYUV_API
+int I420ToI422(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ const int dst_uv_width = (Abs(width) + 1) >> 1;
+ const int dst_uv_height = Abs(height);
+ return I420ToI4xx(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height,
+ dst_uv_width, dst_uv_height);
+}
+
+// 420 chroma is 1/2 width, 1/2 height
+// 444 chroma is 1x width, 1x height
+LIBYUV_API
+int I420ToI444(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ const int dst_uv_width = Abs(width);
+ const int dst_uv_height = Abs(height);
+ return I420ToI4xx(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height,
+ dst_uv_width, dst_uv_height);
+}
+
+// 420 chroma is 1/2 width, 1/2 height
+// 411 chroma is 1/4 width, 1x height
+LIBYUV_API
+int I420ToI411(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ const int dst_uv_width = (Abs(width) + 3) >> 2;
+ const int dst_uv_height = Abs(height);
+ return I420ToI4xx(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height,
+ dst_uv_width, dst_uv_height);
+}
+
+// Copy to I400. Source can be I420,422,444,400,NV12,NV21
+LIBYUV_API
+int I400Copy(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ if (!src_y || !dst_y ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ return 0;
+}
+
+LIBYUV_API
+int I422ToYUY2(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_yuy2, int dst_stride_yuy2,
+ int width, int height) {
+ int y;
+ void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
+ const uint8* src_v, uint8* dst_yuy2, int width) =
+ I422ToYUY2Row_C;
+ if (!src_y || !src_u || !src_v || !dst_yuy2 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
+ dst_stride_yuy2 = -dst_stride_yuy2;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_yuy2 == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_yuy2 = 0;
+ }
+#if defined(HAS_I422TOYUY2ROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToYUY2Row = I422ToYUY2Row_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOYUY2ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToYUY2Row = I422ToYUY2Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ dst_yuy2 += dst_stride_yuy2;
+ }
+ return 0;
+}
+
+LIBYUV_API
+int I420ToYUY2(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_yuy2, int dst_stride_yuy2,
+ int width, int height) {
+ int y;
+ void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
+ const uint8* src_v, uint8* dst_yuy2, int width) =
+ I422ToYUY2Row_C;
+ if (!src_y || !src_u || !src_v || !dst_yuy2 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
+ dst_stride_yuy2 = -dst_stride_yuy2;
+ }
+#if defined(HAS_I422TOYUY2ROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToYUY2Row = I422ToYUY2Row_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOYUY2ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToYUY2Row = I422ToYUY2Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
+ I422ToYUY2Row(src_y + src_stride_y, src_u, src_v,
+ dst_yuy2 + dst_stride_yuy2, width);
+ src_y += src_stride_y * 2;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ dst_yuy2 += dst_stride_yuy2 * 2;
+ }
+ if (height & 1) {
+ I422ToYUY2Row(src_y, src_u, src_v, dst_yuy2, width);
+ }
+ return 0;
+}
+
+LIBYUV_API
+int I422ToUYVY(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_uyvy, int dst_stride_uyvy,
+ int width, int height) {
+ int y;
+ void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
+ const uint8* src_v, uint8* dst_uyvy, int width) =
+ I422ToUYVYRow_C;
+ if (!src_y || !src_u || !src_v || !dst_uyvy ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
+ dst_stride_uyvy = -dst_stride_uyvy;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_uyvy == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_uyvy = 0;
+ }
+#if defined(HAS_I422TOUYVYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToUYVYRow = I422ToUYVYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOUYVYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToUYVYRow = I422ToUYVYRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ dst_uyvy += dst_stride_uyvy;
+ }
+ return 0;
+}
+
+LIBYUV_API
+int I420ToUYVY(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_uyvy, int dst_stride_uyvy,
+ int width, int height) {
+ int y;
+ void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
+ const uint8* src_v, uint8* dst_uyvy, int width) =
+ I422ToUYVYRow_C;
+ if (!src_y || !src_u || !src_v || !dst_uyvy ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
+ dst_stride_uyvy = -dst_stride_uyvy;
+ }
+#if defined(HAS_I422TOUYVYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToUYVYRow = I422ToUYVYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOUYVYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToUYVYRow = I422ToUYVYRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
+ I422ToUYVYRow(src_y + src_stride_y, src_u, src_v,
+ dst_uyvy + dst_stride_uyvy, width);
+ src_y += src_stride_y * 2;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ dst_uyvy += dst_stride_uyvy * 2;
+ }
+ if (height & 1) {
+ I422ToUYVYRow(src_y, src_u, src_v, dst_uyvy, width);
+ }
+ return 0;
+}
+
+LIBYUV_API
+int I420ToNV12(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height) {
+ int y;
+ void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) = MergeUVRow_C;
+ // Coalesce rows.
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ if (!src_y || !src_u || !src_v || !dst_y || !dst_uv ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ dst_y = dst_y + (height - 1) * dst_stride_y;
+ dst_uv = dst_uv + (halfheight - 1) * dst_stride_uv;
+ dst_stride_y = -dst_stride_y;
+ dst_stride_uv = -dst_stride_uv;
+ }
+ if (src_stride_y == width &&
+ dst_stride_y == width) {
+ width *= height;
+ height = 1;
+ src_stride_y = dst_stride_y = 0;
+ }
+ // Coalesce rows.
+ if (src_stride_u == halfwidth &&
+ src_stride_v == halfwidth &&
+ dst_stride_uv == halfwidth * 2) {
+ halfwidth *= halfheight;
+ halfheight = 1;
+ src_stride_u = src_stride_v = dst_stride_uv = 0;
+ }
+#if defined(HAS_MERGEUVROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ MergeUVRow_ = MergeUVRow_Any_SSE2;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ MergeUVRow_ = MergeUVRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ MergeUVRow_ = MergeUVRow_Any_AVX2;
+ if (IS_ALIGNED(halfwidth, 32)) {
+ MergeUVRow_ = MergeUVRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ MergeUVRow_ = MergeUVRow_Any_NEON;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ MergeUVRow_ = MergeUVRow_NEON;
+ }
+ }
+#endif
+
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ for (y = 0; y < halfheight; ++y) {
+ // Merge a row of U and V into a row of UV.
+ MergeUVRow_(src_u, src_v, dst_uv, halfwidth);
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ dst_uv += dst_stride_uv;
+ }
+ return 0;
+}
+
+LIBYUV_API
+int I420ToNV21(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_vu, int dst_stride_vu,
+ int width, int height) {
+ return I420ToNV12(src_y, src_stride_y,
+ src_v, src_stride_v,
+ src_u, src_stride_u,
+ dst_y, src_stride_y,
+ dst_vu, dst_stride_vu,
+ width, height);
+}
+
+// Convert I420 to ARGB.
+LIBYUV_API
+int I420ToARGB(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*I422ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToARGBRow_C;
+ if (!src_y || !src_u || !src_v || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+#if defined(HAS_I422TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToARGBRow = I422ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToARGBRow = I422ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToARGBRow = I422ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToARGBRow(src_y, src_u, src_v, dst_argb, width);
+ dst_argb += dst_stride_argb;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to BGRA.
+LIBYUV_API
+int I420ToBGRA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_bgra, int dst_stride_bgra,
+ int width, int height) {
+ int y;
+ void (*I422ToBGRARow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToBGRARow_C;
+ if (!src_y || !src_u || !src_v || !dst_bgra ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_bgra = dst_bgra + (height - 1) * dst_stride_bgra;
+ dst_stride_bgra = -dst_stride_bgra;
+ }
+#if defined(HAS_I422TOBGRAROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToBGRARow = I422ToBGRARow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToBGRARow = I422ToBGRARow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToBGRARow = I422ToBGRARow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToBGRARow = I422ToBGRARow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToBGRARow = I422ToBGRARow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToBGRARow = I422ToBGRARow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_bgra, 4) && IS_ALIGNED(dst_stride_bgra, 4)) {
+ I422ToBGRARow = I422ToBGRARow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToBGRARow(src_y, src_u, src_v, dst_bgra, width);
+ dst_bgra += dst_stride_bgra;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to ABGR.
+LIBYUV_API
+int I420ToABGR(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_abgr, int dst_stride_abgr,
+ int width, int height) {
+ int y;
+ void (*I422ToABGRRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToABGRRow_C;
+ if (!src_y || !src_u || !src_v || !dst_abgr ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_abgr = dst_abgr + (height - 1) * dst_stride_abgr;
+ dst_stride_abgr = -dst_stride_abgr;
+ }
+#if defined(HAS_I422TOABGRROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToABGRRow = I422ToABGRRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToABGRRow = I422ToABGRRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOABGRROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToABGRRow = I422ToABGRRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToABGRRow = I422ToABGRRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOABGRROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToABGRRow = I422ToABGRRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToABGRRow = I422ToABGRRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToABGRRow(src_y, src_u, src_v, dst_abgr, width);
+ dst_abgr += dst_stride_abgr;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to RGBA.
+LIBYUV_API
+int I420ToRGBA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgba, int dst_stride_rgba,
+ int width, int height) {
+ int y;
+ void (*I422ToRGBARow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToRGBARow_C;
+ if (!src_y || !src_u || !src_v || !dst_rgba ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgba = dst_rgba + (height - 1) * dst_stride_rgba;
+ dst_stride_rgba = -dst_stride_rgba;
+ }
+#if defined(HAS_I422TORGBAROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToRGBARow = I422ToRGBARow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGBARow = I422ToRGBARow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGBAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToRGBARow = I422ToRGBARow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToRGBARow = I422ToRGBARow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGBAROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToRGBARow = I422ToRGBARow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGBARow = I422ToRGBARow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToRGBARow(src_y, src_u, src_v, dst_rgba, width);
+ dst_rgba += dst_stride_rgba;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to RGB24.
+LIBYUV_API
+int I420ToRGB24(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgb24, int dst_stride_rgb24,
+ int width, int height) {
+ int y;
+ void (*I422ToRGB24Row)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToRGB24Row_C;
+ if (!src_y || !src_u || !src_v || !dst_rgb24 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgb24 = dst_rgb24 + (height - 1) * dst_stride_rgb24;
+ dst_stride_rgb24 = -dst_stride_rgb24;
+ }
+#if defined(HAS_I422TORGB24ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToRGB24Row = I422ToRGB24Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGB24Row = I422ToRGB24Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGB24ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToRGB24Row = I422ToRGB24Row_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToRGB24Row = I422ToRGB24Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGB24ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToRGB24Row = I422ToRGB24Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGB24Row = I422ToRGB24Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToRGB24Row(src_y, src_u, src_v, dst_rgb24, width);
+ dst_rgb24 += dst_stride_rgb24;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to RAW.
+LIBYUV_API
+int I420ToRAW(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_raw, int dst_stride_raw,
+ int width, int height) {
+ int y;
+ void (*I422ToRAWRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToRAWRow_C;
+ if (!src_y || !src_u || !src_v || !dst_raw ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_raw = dst_raw + (height - 1) * dst_stride_raw;
+ dst_stride_raw = -dst_stride_raw;
+ }
+#if defined(HAS_I422TORAWROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToRAWRow = I422ToRAWRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRAWRow = I422ToRAWRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TORAWROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToRAWRow = I422ToRAWRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToRAWRow = I422ToRAWRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TORAWROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToRAWRow = I422ToRAWRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRAWRow = I422ToRAWRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToRAWRow(src_y, src_u, src_v, dst_raw, width);
+ dst_raw += dst_stride_raw;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to ARGB1555.
+LIBYUV_API
+int I420ToARGB1555(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb1555, int dst_stride_argb1555,
+ int width, int height) {
+ int y;
+ void (*I422ToARGB1555Row)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToARGB1555Row_C;
+ if (!src_y || !src_u || !src_v || !dst_argb1555 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb1555 = dst_argb1555 + (height - 1) * dst_stride_argb1555;
+ dst_stride_argb1555 = -dst_stride_argb1555;
+ }
+#if defined(HAS_I422TOARGB1555ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToARGB1555Row = I422ToARGB1555Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGB1555Row = I422ToARGB1555Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGB1555ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToARGB1555Row = I422ToARGB1555Row_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToARGB1555Row = I422ToARGB1555Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGB1555ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToARGB1555Row = I422ToARGB1555Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGB1555Row = I422ToARGB1555Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToARGB1555Row(src_y, src_u, src_v, dst_argb1555, width);
+ dst_argb1555 += dst_stride_argb1555;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+
+// Convert I420 to ARGB4444.
+LIBYUV_API
+int I420ToARGB4444(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_argb4444, int dst_stride_argb4444,
+ int width, int height) {
+ int y;
+ void (*I422ToARGB4444Row)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToARGB4444Row_C;
+ if (!src_y || !src_u || !src_v || !dst_argb4444 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb4444 = dst_argb4444 + (height - 1) * dst_stride_argb4444;
+ dst_stride_argb4444 = -dst_stride_argb4444;
+ }
+#if defined(HAS_I422TOARGB4444ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToARGB4444Row = I422ToARGB4444Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGB4444Row = I422ToARGB4444Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGB4444ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToARGB4444Row = I422ToARGB4444Row_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToARGB4444Row = I422ToARGB4444Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGB4444ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToARGB4444Row = I422ToARGB4444Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGB4444Row = I422ToARGB4444Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToARGB4444Row(src_y, src_u, src_v, dst_argb4444, width);
+ dst_argb4444 += dst_stride_argb4444;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Convert I420 to RGB565.
+LIBYUV_API
+int I420ToRGB565(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height) {
+ int y;
+ void (*I422ToRGB565Row)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToRGB565Row_C;
+ if (!src_y || !src_u || !src_v || !dst_rgb565 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgb565 = dst_rgb565 + (height - 1) * dst_stride_rgb565;
+ dst_stride_rgb565 = -dst_stride_rgb565;
+ }
+#if defined(HAS_I422TORGB565ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToRGB565Row = I422ToRGB565Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGB565Row = I422ToRGB565Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGB565ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToRGB565Row = I422ToRGB565Row_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToRGB565Row = I422ToRGB565Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGB565ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToRGB565Row = I422ToRGB565Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGB565Row = I422ToRGB565Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToRGB565Row(src_y, src_u, src_v, dst_rgb565, width);
+ dst_rgb565 += dst_stride_rgb565;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ return 0;
+}
+
+// Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
+static const uint8 kDither565_4x4[16] = {
+ 0, 4, 1, 5,
+ 6, 2, 7, 3,
+ 1, 5, 0, 4,
+ 7, 3, 6, 2,
+};
+
+// Convert I420 to RGB565 with dithering.
+LIBYUV_API
+int I420ToRGB565Dither(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ const uint8* dither4x4, int width, int height) {
+ int y;
+ void (*I422ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToARGBRow_C;
+ void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix) = ARGBToRGB565DitherRow_C;
+ if (!src_y || !src_u || !src_v || !dst_rgb565 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgb565 = dst_rgb565 + (height - 1) * dst_stride_rgb565;
+ dst_stride_rgb565 = -dst_stride_rgb565;
+ }
+ if (!dither4x4) {
+ dither4x4 = kDither565_4x4;
+ }
+#if defined(HAS_I422TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToARGBRow = I422ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToARGBRow = I422ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToARGBRow = I422ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2)) {
+ I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ }
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
+ }
+ }
+#endif
+ {
+ // Allocate a row of argb.
+ align_buffer_64(row_argb, width * 4);
+ for (y = 0; y < height; ++y) {
+ I422ToARGBRow(src_y, src_u, src_v, row_argb, width);
+ ARGBToRGB565DitherRow(row_argb, dst_rgb565,
+ *(uint32*)(dither4x4 + ((y & 3) << 2)), width);
+ dst_rgb565 += dst_stride_rgb565;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ }
+ free_aligned_buffer_64(row_argb);
+ }
+ return 0;
+}
+
+// Convert I420 to specified format
+LIBYUV_API
+int ConvertFromI420(const uint8* y, int y_stride,
+ const uint8* u, int u_stride,
+ const uint8* v, int v_stride,
+ uint8* dst_sample, int dst_sample_stride,
+ int width, int height,
+ uint32 fourcc) {
+ uint32 format = CanonicalFourCC(fourcc);
+ int r = 0;
+ if (!y || !u|| !v || !dst_sample ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ switch (format) {
+ // Single plane formats
+ case FOURCC_YUY2:
+ r = I420ToYUY2(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 2,
+ width, height);
+ break;
+ case FOURCC_UYVY:
+ r = I420ToUYVY(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 2,
+ width, height);
+ break;
+ case FOURCC_RGBP:
+ r = I420ToRGB565(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 2,
+ width, height);
+ break;
+ case FOURCC_RGBO:
+ r = I420ToARGB1555(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 2,
+ width, height);
+ break;
+ case FOURCC_R444:
+ r = I420ToARGB4444(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 2,
+ width, height);
+ break;
+ case FOURCC_24BG:
+ r = I420ToRGB24(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 3,
+ width, height);
+ break;
+ case FOURCC_RAW:
+ r = I420ToRAW(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 3,
+ width, height);
+ break;
+ case FOURCC_ARGB:
+ r = I420ToARGB(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 4,
+ width, height);
+ break;
+ case FOURCC_BGRA:
+ r = I420ToBGRA(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 4,
+ width, height);
+ break;
+ case FOURCC_ABGR:
+ r = I420ToABGR(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 4,
+ width, height);
+ break;
+ case FOURCC_RGBA:
+ r = I420ToRGBA(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width * 4,
+ width, height);
+ break;
+ case FOURCC_I400:
+ r = I400Copy(y, y_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width,
+ width, height);
+ break;
+ case FOURCC_NV12: {
+ uint8* dst_uv = dst_sample + width * height;
+ r = I420ToNV12(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width,
+ dst_uv,
+ dst_sample_stride ? dst_sample_stride : width,
+ width, height);
+ break;
+ }
+ case FOURCC_NV21: {
+ uint8* dst_vu = dst_sample + width * height;
+ r = I420ToNV21(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample,
+ dst_sample_stride ? dst_sample_stride : width,
+ dst_vu,
+ dst_sample_stride ? dst_sample_stride : width,
+ width, height);
+ break;
+ }
+ // TODO(fbarchard): Add M420.
+ // Triplanar formats
+ // TODO(fbarchard): halfstride instead of halfwidth
+ case FOURCC_I420:
+ case FOURCC_YU12:
+ case FOURCC_YV12: {
+ int halfwidth = (width + 1) / 2;
+ int halfheight = (height + 1) / 2;
+ uint8* dst_u;
+ uint8* dst_v;
+ if (format == FOURCC_YV12) {
+ dst_v = dst_sample + width * height;
+ dst_u = dst_v + halfwidth * halfheight;
+ } else {
+ dst_u = dst_sample + width * height;
+ dst_v = dst_u + halfwidth * halfheight;
+ }
+ r = I420Copy(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample, width,
+ dst_u, halfwidth,
+ dst_v, halfwidth,
+ width, height);
+ break;
+ }
+ case FOURCC_I422:
+ case FOURCC_YV16: {
+ int halfwidth = (width + 1) / 2;
+ uint8* dst_u;
+ uint8* dst_v;
+ if (format == FOURCC_YV16) {
+ dst_v = dst_sample + width * height;
+ dst_u = dst_v + halfwidth * height;
+ } else {
+ dst_u = dst_sample + width * height;
+ dst_v = dst_u + halfwidth * height;
+ }
+ r = I420ToI422(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample, width,
+ dst_u, halfwidth,
+ dst_v, halfwidth,
+ width, height);
+ break;
+ }
+ case FOURCC_I444:
+ case FOURCC_YV24: {
+ uint8* dst_u;
+ uint8* dst_v;
+ if (format == FOURCC_YV24) {
+ dst_v = dst_sample + width * height;
+ dst_u = dst_v + width * height;
+ } else {
+ dst_u = dst_sample + width * height;
+ dst_v = dst_u + width * height;
+ }
+ r = I420ToI444(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample, width,
+ dst_u, width,
+ dst_v, width,
+ width, height);
+ break;
+ }
+ case FOURCC_I411: {
+ int quarterwidth = (width + 3) / 4;
+ uint8* dst_u = dst_sample + width * height;
+ uint8* dst_v = dst_u + quarterwidth * height;
+ r = I420ToI411(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ dst_sample, width,
+ dst_u, quarterwidth,
+ dst_v, quarterwidth,
+ width, height);
+ break;
+ }
+
+ // Formats not supported - MJPG, biplanar, some rgb formats.
+ default:
+ return -1; // unknown fourcc - return failure code.
+ }
+ return r;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert_from_argb.cc b/media/libaom/src/third_party/libyuv/source/convert_from_argb.cc
new file mode 100644
index 000000000..8d1e97aec
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert_from_argb.cc
@@ -0,0 +1,1301 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/convert_from_argb.h"
+
+#include "libyuv/basic_types.h"
+#include "libyuv/cpu_id.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// ARGB little endian (bgra in memory) to I444
+LIBYUV_API
+int ARGBToI444(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ void (*ARGBToUV444Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) = ARGBToUV444Row_C;
+ if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_y == width &&
+ dst_stride_u == width &&
+ dst_stride_v == width) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_ARGBTOUV444ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUV444Row = ARGBToUV444Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV444Row = ARGBToUV444Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUV444ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUV444Row = ARGBToUV444Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToUV444Row = ARGBToUV444Row_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToUV444Row(src_argb, dst_u, dst_v, width);
+ ARGBToYRow(src_argb, dst_y, width);
+ src_argb += src_stride_argb;
+ dst_y += dst_stride_y;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ return 0;
+}
+
+// ARGB little endian (bgra in memory) to I422
+LIBYUV_API
+int ARGBToI422(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) = ARGBToUV422Row_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_y == width &&
+ dst_stride_u * 2 == width &&
+ dst_stride_v * 2 == width) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_ARGBTOUV422ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV422Row = ARGBToUV422Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUV422ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV422Row = ARGBToUV422Row_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToUV422Row(src_argb, dst_u, dst_v, width);
+ ARGBToYRow(src_argb, dst_y, width);
+ src_argb += src_stride_argb;
+ dst_y += dst_stride_y;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ return 0;
+}
+
+// ARGB little endian (bgra in memory) to I411
+LIBYUV_API
+int ARGBToI411(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ARGBToUV411Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) = ARGBToUV411Row_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_y == width &&
+ dst_stride_u * 4 == width &&
+ dst_stride_v * 4 == width) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_ARGBTOYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUV411ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUV411Row = ARGBToUV411Row_Any_NEON;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUV411Row = ARGBToUV411Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToUV411Row(src_argb, dst_u, dst_v, width);
+ ARGBToYRow(src_argb, dst_y, width);
+ src_argb += src_stride_argb;
+ dst_y += dst_stride_y;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ return 0;
+}
+
+LIBYUV_API
+int ARGBToNV12(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height) {
+ int y;
+ int halfwidth = (width + 1) >> 1;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) = MergeUVRow_C;
+ if (!src_argb ||
+ !dst_y || !dst_uv ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVRow = ARGBToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ MergeUVRow_ = MergeUVRow_Any_SSE2;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ MergeUVRow_ = MergeUVRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ MergeUVRow_ = MergeUVRow_Any_AVX2;
+ if (IS_ALIGNED(halfwidth, 32)) {
+ MergeUVRow_ = MergeUVRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ MergeUVRow_ = MergeUVRow_Any_NEON;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ MergeUVRow_ = MergeUVRow_NEON;
+ }
+ }
+#endif
+ {
+ // Allocate a rows of uv.
+ align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
+ uint8* row_v = row_u + ((halfwidth + 31) & ~31);
+
+ for (y = 0; y < height - 1; y += 2) {
+ ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
+ MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
+ ARGBToYRow(src_argb, dst_y, width);
+ ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
+ src_argb += src_stride_argb * 2;
+ dst_y += dst_stride_y * 2;
+ dst_uv += dst_stride_uv;
+ }
+ if (height & 1) {
+ ARGBToUVRow(src_argb, 0, row_u, row_v, width);
+ MergeUVRow_(row_u, row_v, dst_uv, halfwidth);
+ ARGBToYRow(src_argb, dst_y, width);
+ }
+ free_aligned_buffer_64(row_u);
+ }
+ return 0;
+}
+
+// Same as NV12 but U and V swapped.
+LIBYUV_API
+int ARGBToNV21(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height) {
+ int y;
+ int halfwidth = (width + 1) >> 1;
+ void (*ARGBToUVRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVRow_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ void (*MergeUVRow_)(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) = MergeUVRow_C;
+ if (!src_argb ||
+ !dst_y || !dst_uv ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+#if defined(HAS_ARGBTOYROW_SSSE3) && defined(HAS_ARGBTOUVROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVRow = ARGBToUVRow_Any_SSSE3;
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_SSSE3;
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2) && defined(HAS_ARGBTOUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToUVRow = ARGBToUVRow_Any_AVX2;
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToUVRow = ARGBToUVRow_AVX2;
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVRow = ARGBToUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVRow = ARGBToUVRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ MergeUVRow_ = MergeUVRow_Any_SSE2;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ MergeUVRow_ = MergeUVRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ MergeUVRow_ = MergeUVRow_Any_AVX2;
+ if (IS_ALIGNED(halfwidth, 32)) {
+ MergeUVRow_ = MergeUVRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_MERGEUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ MergeUVRow_ = MergeUVRow_Any_NEON;
+ if (IS_ALIGNED(halfwidth, 16)) {
+ MergeUVRow_ = MergeUVRow_NEON;
+ }
+ }
+#endif
+ {
+ // Allocate a rows of uv.
+ align_buffer_64(row_u, ((halfwidth + 31) & ~31) * 2);
+ uint8* row_v = row_u + ((halfwidth + 31) & ~31);
+
+ for (y = 0; y < height - 1; y += 2) {
+ ARGBToUVRow(src_argb, src_stride_argb, row_u, row_v, width);
+ MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
+ ARGBToYRow(src_argb, dst_y, width);
+ ARGBToYRow(src_argb + src_stride_argb, dst_y + dst_stride_y, width);
+ src_argb += src_stride_argb * 2;
+ dst_y += dst_stride_y * 2;
+ dst_uv += dst_stride_uv;
+ }
+ if (height & 1) {
+ ARGBToUVRow(src_argb, 0, row_u, row_v, width);
+ MergeUVRow_(row_v, row_u, dst_uv, halfwidth);
+ ARGBToYRow(src_argb, dst_y, width);
+ }
+ free_aligned_buffer_64(row_u);
+ }
+ return 0;
+}
+
+// Convert ARGB to YUY2.
+LIBYUV_API
+int ARGBToYUY2(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yuy2, int dst_stride_yuy2,
+ int width, int height) {
+ int y;
+ void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) = ARGBToUV422Row_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ void (*I422ToYUY2Row)(const uint8* src_y, const uint8* src_u,
+ const uint8* src_v, uint8* dst_yuy2, int width) = I422ToYUY2Row_C;
+
+ if (!src_argb || !dst_yuy2 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_yuy2 = dst_yuy2 + (height - 1) * dst_stride_yuy2;
+ dst_stride_yuy2 = -dst_stride_yuy2;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_yuy2 == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_yuy2 = 0;
+ }
+#if defined(HAS_ARGBTOUV422ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV422Row = ARGBToUV422Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUV422ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV422Row = ARGBToUV422Row_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+
+#if defined(HAS_I422TOYUY2ROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I422ToYUY2Row = I422ToYUY2Row_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToYUY2Row = I422ToYUY2Row_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOYUY2ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToYUY2Row = I422ToYUY2Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToYUY2Row = I422ToYUY2Row_NEON;
+ }
+ }
+#endif
+
+ {
+ // Allocate a rows of yuv.
+ align_buffer_64(row_y, ((width + 63) & ~63) * 2);
+ uint8* row_u = row_y + ((width + 63) & ~63);
+ uint8* row_v = row_u + ((width + 63) & ~63) / 2;
+
+ for (y = 0; y < height; ++y) {
+ ARGBToUV422Row(src_argb, row_u, row_v, width);
+ ARGBToYRow(src_argb, row_y, width);
+ I422ToYUY2Row(row_y, row_u, row_v, dst_yuy2, width);
+ src_argb += src_stride_argb;
+ dst_yuy2 += dst_stride_yuy2;
+ }
+
+ free_aligned_buffer_64(row_y);
+ }
+ return 0;
+}
+
+// Convert ARGB to UYVY.
+LIBYUV_API
+int ARGBToUYVY(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_uyvy, int dst_stride_uyvy,
+ int width, int height) {
+ int y;
+ void (*ARGBToUV422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) = ARGBToUV422Row_C;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ void (*I422ToUYVYRow)(const uint8* src_y, const uint8* src_u,
+ const uint8* src_v, uint8* dst_uyvy, int width) = I422ToUYVYRow_C;
+
+ if (!src_argb || !dst_uyvy ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_uyvy = dst_uyvy + (height - 1) * dst_stride_uyvy;
+ dst_stride_uyvy = -dst_stride_uyvy;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_uyvy == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_uyvy = 0;
+ }
+#if defined(HAS_ARGBTOUV422ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUV422Row = ARGBToUV422Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV422Row = ARGBToUV422Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUV422ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUV422Row = ARGBToUV422Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUV422Row = ARGBToUV422Row_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+
+#if defined(HAS_I422TOUYVYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ I422ToUYVYRow = I422ToUYVYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToUYVYRow = I422ToUYVYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOUYVYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToUYVYRow = I422ToUYVYRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToUYVYRow = I422ToUYVYRow_NEON;
+ }
+ }
+#endif
+
+ {
+ // Allocate a rows of yuv.
+ align_buffer_64(row_y, ((width + 63) & ~63) * 2);
+ uint8* row_u = row_y + ((width + 63) & ~63);
+ uint8* row_v = row_u + ((width + 63) & ~63) / 2;
+
+ for (y = 0; y < height; ++y) {
+ ARGBToUV422Row(src_argb, row_u, row_v, width);
+ ARGBToYRow(src_argb, row_y, width);
+ I422ToUYVYRow(row_y, row_u, row_v, dst_uyvy, width);
+ src_argb += src_stride_argb;
+ dst_uyvy += dst_stride_uyvy;
+ }
+
+ free_aligned_buffer_64(row_y);
+ }
+ return 0;
+}
+
+// Convert ARGB to I400.
+LIBYUV_API
+int ARGBToI400(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ int y;
+ void (*ARGBToYRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYRow_C;
+ if (!src_argb || !dst_y || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_y == width) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_y = 0;
+ }
+#if defined(HAS_ARGBTOYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYRow = ARGBToYRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYRow = ARGBToYRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYRow = ARGBToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYRow = ARGBToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYRow = ARGBToYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYRow = ARGBToYRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToYRow(src_argb, dst_y, width);
+ src_argb += src_stride_argb;
+ dst_y += dst_stride_y;
+ }
+ return 0;
+}
+
+// Shuffle table for converting ARGB to RGBA.
+static uvec8 kShuffleMaskARGBToRGBA = {
+ 3u, 0u, 1u, 2u, 7u, 4u, 5u, 6u, 11u, 8u, 9u, 10u, 15u, 12u, 13u, 14u
+};
+
+// Convert ARGB to RGBA.
+LIBYUV_API
+int ARGBToRGBA(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgba, int dst_stride_rgba,
+ int width, int height) {
+ return ARGBShuffle(src_argb, src_stride_argb,
+ dst_rgba, dst_stride_rgba,
+ (const uint8*)(&kShuffleMaskARGBToRGBA),
+ width, height);
+}
+
+// Convert ARGB To RGB24.
+LIBYUV_API
+int ARGBToRGB24(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb24, int dst_stride_rgb24,
+ int width, int height) {
+ int y;
+ void (*ARGBToRGB24Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
+ ARGBToRGB24Row_C;
+ if (!src_argb || !dst_rgb24 || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_rgb24 == width * 3) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_rgb24 = 0;
+ }
+#if defined(HAS_ARGBTORGB24ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToRGB24Row = ARGBToRGB24Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToRGB24Row = ARGBToRGB24Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB24ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToRGB24Row = ARGBToRGB24Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB24Row = ARGBToRGB24Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToRGB24Row(src_argb, dst_rgb24, width);
+ src_argb += src_stride_argb;
+ dst_rgb24 += dst_stride_rgb24;
+ }
+ return 0;
+}
+
+// Convert ARGB To RAW.
+LIBYUV_API
+int ARGBToRAW(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_raw, int dst_stride_raw,
+ int width, int height) {
+ int y;
+ void (*ARGBToRAWRow)(const uint8* src_argb, uint8* dst_rgb, int pix) =
+ ARGBToRAWRow_C;
+ if (!src_argb || !dst_raw || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_raw == width * 3) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_raw = 0;
+ }
+#if defined(HAS_ARGBTORAWROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToRAWRow = ARGBToRAWRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToRAWRow = ARGBToRAWRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORAWROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToRAWRow = ARGBToRAWRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRAWRow = ARGBToRAWRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToRAWRow(src_argb, dst_raw, width);
+ src_argb += src_stride_argb;
+ dst_raw += dst_stride_raw;
+ }
+ return 0;
+}
+
+// Ordered 8x8 dither for 888 to 565. Values from 0 to 7.
+static const uint8 kDither565_4x4[16] = {
+ 0, 4, 1, 5,
+ 6, 2, 7, 3,
+ 1, 5, 0, 4,
+ 7, 3, 6, 2,
+};
+
+// Convert ARGB To RGB565 with 4x4 dither matrix (16 bytes).
+LIBYUV_API
+int ARGBToRGB565Dither(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ const uint8* dither4x4, int width, int height) {
+ int y;
+ void (*ARGBToRGB565DitherRow)(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix) = ARGBToRGB565DitherRow_C;
+ if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ if (!dither4x4) {
+ dither4x4 = kDither565_4x4;
+ }
+#if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB565DitherRow = ARGBToRGB565DitherRow_NEON;
+ }
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ ARGBToRGB565DitherRow(src_argb, dst_rgb565,
+ *(uint32*)(dither4x4 + ((y & 3) << 2)), width);
+ src_argb += src_stride_argb;
+ dst_rgb565 += dst_stride_rgb565;
+ }
+ return 0;
+}
+
+// Convert ARGB To RGB565.
+// TODO(fbarchard): Consider using dither function low level with zeros.
+LIBYUV_API
+int ARGBToRGB565(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height) {
+ int y;
+ void (*ARGBToRGB565Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
+ ARGBToRGB565Row_C;
+ if (!src_argb || !dst_rgb565 || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_rgb565 == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_rgb565 = 0;
+ }
+#if defined(HAS_ARGBTORGB565ROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBToRGB565Row = ARGBToRGB565Row_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBToRGB565Row = ARGBToRGB565Row_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB565ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToRGB565Row = ARGBToRGB565Row_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB565Row = ARGBToRGB565Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTORGB565ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToRGB565Row = ARGBToRGB565Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToRGB565Row = ARGBToRGB565Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToRGB565Row(src_argb, dst_rgb565, width);
+ src_argb += src_stride_argb;
+ dst_rgb565 += dst_stride_rgb565;
+ }
+ return 0;
+}
+
+// Convert ARGB To ARGB1555.
+LIBYUV_API
+int ARGBToARGB1555(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb1555, int dst_stride_argb1555,
+ int width, int height) {
+ int y;
+ void (*ARGBToARGB1555Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
+ ARGBToARGB1555Row_C;
+ if (!src_argb || !dst_argb1555 || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb1555 == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb1555 = 0;
+ }
+#if defined(HAS_ARGBTOARGB1555ROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBToARGB1555Row = ARGBToARGB1555Row_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBToARGB1555Row = ARGBToARGB1555Row_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOARGB1555ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToARGB1555Row = ARGBToARGB1555Row_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToARGB1555Row = ARGBToARGB1555Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOARGB1555ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToARGB1555Row = ARGBToARGB1555Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToARGB1555Row = ARGBToARGB1555Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToARGB1555Row(src_argb, dst_argb1555, width);
+ src_argb += src_stride_argb;
+ dst_argb1555 += dst_stride_argb1555;
+ }
+ return 0;
+}
+
+// Convert ARGB To ARGB4444.
+LIBYUV_API
+int ARGBToARGB4444(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb4444, int dst_stride_argb4444,
+ int width, int height) {
+ int y;
+ void (*ARGBToARGB4444Row)(const uint8* src_argb, uint8* dst_rgb, int pix) =
+ ARGBToARGB4444Row_C;
+ if (!src_argb || !dst_argb4444 || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb4444 == width * 2) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb4444 = 0;
+ }
+#if defined(HAS_ARGBTOARGB4444ROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBToARGB4444Row = ARGBToARGB4444Row_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBToARGB4444Row = ARGBToARGB4444Row_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOARGB4444ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToARGB4444Row = ARGBToARGB4444Row_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToARGB4444Row = ARGBToARGB4444Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOARGB4444ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToARGB4444Row = ARGBToARGB4444Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToARGB4444Row = ARGBToARGB4444Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToARGB4444Row(src_argb, dst_argb4444, width);
+ src_argb += src_stride_argb;
+ dst_argb4444 += dst_stride_argb4444;
+ }
+ return 0;
+}
+
+// Convert ARGB to J420. (JPeg full range I420).
+LIBYUV_API
+int ARGBToJ420(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yj, int dst_stride_yj,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ARGBToUVJRow)(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) = ARGBToUVJRow_C;
+ void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int pix) =
+ ARGBToYJRow_C;
+ if (!src_argb ||
+ !dst_yj || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+#if defined(HAS_ARGBTOYJROW_SSSE3) && defined(HAS_ARGBTOUVJROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVJRow = ARGBToUVJRow_Any_SSSE3;
+ ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVJRow = ARGBToUVJRow_SSSE3;
+ ARGBToYJRow = ARGBToYJRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYJRow = ARGBToYJRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYJRow = ARGBToYJRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYJRow = ARGBToYJRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYJRow = ARGBToYJRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUVJROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVJRow = ARGBToUVJRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVJRow = ARGBToUVJRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height - 1; y += 2) {
+ ARGBToUVJRow(src_argb, src_stride_argb, dst_u, dst_v, width);
+ ARGBToYJRow(src_argb, dst_yj, width);
+ ARGBToYJRow(src_argb + src_stride_argb, dst_yj + dst_stride_yj, width);
+ src_argb += src_stride_argb * 2;
+ dst_yj += dst_stride_yj * 2;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ if (height & 1) {
+ ARGBToUVJRow(src_argb, 0, dst_u, dst_v, width);
+ ARGBToYJRow(src_argb, dst_yj, width);
+ }
+ return 0;
+}
+
+// ARGB little endian (bgra in memory) to J422
+LIBYUV_API
+int ARGBToJ422(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*ARGBToUVJ422Row)(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) = ARGBToUVJ422Row_C;
+ void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_y, int pix) =
+ ARGBToYJRow_C;
+ if (!src_argb || !dst_y || !dst_u || !dst_v || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_y == width &&
+ dst_stride_u * 2 == width &&
+ dst_stride_v * 2 == width) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_y = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_ARGBTOUVJ422ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToUVJ422Row = ARGBToUVJ422Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVJ422Row = ARGBToUVJ422Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOUVJ422ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToUVJ422Row = ARGBToUVJ422Row_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToUVJ422Row = ARGBToUVJ422Row_NEON;
+ }
+ }
+#endif
+
+#if defined(HAS_ARGBTOYJROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYJRow = ARGBToYJRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYJRow = ARGBToYJRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYJRow = ARGBToYJRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYJRow = ARGBToYJRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYJRow = ARGBToYJRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToUVJ422Row(src_argb, dst_u, dst_v, width);
+ ARGBToYJRow(src_argb, dst_y, width);
+ src_argb += src_stride_argb;
+ dst_y += dst_stride_y;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ return 0;
+}
+
+// Convert ARGB to J400.
+LIBYUV_API
+int ARGBToJ400(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_yj, int dst_stride_yj,
+ int width, int height) {
+ int y;
+ void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_yj, int pix) =
+ ARGBToYJRow_C;
+ if (!src_argb || !dst_yj || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_yj == width) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_yj = 0;
+ }
+#if defined(HAS_ARGBTOYJROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYJRow = ARGBToYJRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYJRow = ARGBToYJRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYJRow = ARGBToYJRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYJRow = ARGBToYJRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYJRow = ARGBToYJRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBToYJRow(src_argb, dst_yj, width);
+ src_argb += src_stride_argb;
+ dst_yj += dst_stride_yj;
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert_jpeg.cc b/media/libaom/src/third_party/libyuv/source/convert_jpeg.cc
new file mode 100644
index 000000000..bcb980f7f
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert_jpeg.cc
@@ -0,0 +1,392 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/convert.h"
+
+#ifdef HAVE_JPEG
+#include "libyuv/mjpeg_decoder.h"
+#endif
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#ifdef HAVE_JPEG
+struct I420Buffers {
+ uint8* y;
+ int y_stride;
+ uint8* u;
+ int u_stride;
+ uint8* v;
+ int v_stride;
+ int w;
+ int h;
+};
+
+static void JpegCopyI420(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ I420Buffers* dest = (I420Buffers*)(opaque);
+ I420Copy(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->y, dest->y_stride,
+ dest->u, dest->u_stride,
+ dest->v, dest->v_stride,
+ dest->w, rows);
+ dest->y += rows * dest->y_stride;
+ dest->u += ((rows + 1) >> 1) * dest->u_stride;
+ dest->v += ((rows + 1) >> 1) * dest->v_stride;
+ dest->h -= rows;
+}
+
+static void JpegI422ToI420(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ I420Buffers* dest = (I420Buffers*)(opaque);
+ I422ToI420(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->y, dest->y_stride,
+ dest->u, dest->u_stride,
+ dest->v, dest->v_stride,
+ dest->w, rows);
+ dest->y += rows * dest->y_stride;
+ dest->u += ((rows + 1) >> 1) * dest->u_stride;
+ dest->v += ((rows + 1) >> 1) * dest->v_stride;
+ dest->h -= rows;
+}
+
+static void JpegI444ToI420(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ I420Buffers* dest = (I420Buffers*)(opaque);
+ I444ToI420(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->y, dest->y_stride,
+ dest->u, dest->u_stride,
+ dest->v, dest->v_stride,
+ dest->w, rows);
+ dest->y += rows * dest->y_stride;
+ dest->u += ((rows + 1) >> 1) * dest->u_stride;
+ dest->v += ((rows + 1) >> 1) * dest->v_stride;
+ dest->h -= rows;
+}
+
+static void JpegI411ToI420(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ I420Buffers* dest = (I420Buffers*)(opaque);
+ I411ToI420(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->y, dest->y_stride,
+ dest->u, dest->u_stride,
+ dest->v, dest->v_stride,
+ dest->w, rows);
+ dest->y += rows * dest->y_stride;
+ dest->u += ((rows + 1) >> 1) * dest->u_stride;
+ dest->v += ((rows + 1) >> 1) * dest->v_stride;
+ dest->h -= rows;
+}
+
+static void JpegI400ToI420(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ I420Buffers* dest = (I420Buffers*)(opaque);
+ I400ToI420(data[0], strides[0],
+ dest->y, dest->y_stride,
+ dest->u, dest->u_stride,
+ dest->v, dest->v_stride,
+ dest->w, rows);
+ dest->y += rows * dest->y_stride;
+ dest->u += ((rows + 1) >> 1) * dest->u_stride;
+ dest->v += ((rows + 1) >> 1) * dest->v_stride;
+ dest->h -= rows;
+}
+
+// Query size of MJPG in pixels.
+LIBYUV_API
+int MJPGSize(const uint8* sample, size_t sample_size,
+ int* width, int* height) {
+ MJpegDecoder mjpeg_decoder;
+ LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size);
+ if (ret) {
+ *width = mjpeg_decoder.GetWidth();
+ *height = mjpeg_decoder.GetHeight();
+ }
+ mjpeg_decoder.UnloadFrame();
+ return ret ? 0 : -1; // -1 for runtime failure.
+}
+
+// MJPG (Motion JPeg) to I420
+// TODO(fbarchard): review w and h requirement. dw and dh may be enough.
+LIBYUV_API
+int MJPGToI420(const uint8* sample,
+ size_t sample_size,
+ uint8* y, int y_stride,
+ uint8* u, int u_stride,
+ uint8* v, int v_stride,
+ int w, int h,
+ int dw, int dh) {
+ if (sample_size == kUnknownDataSize) {
+ // ERROR: MJPEG frame size unknown
+ return -1;
+ }
+
+ // TODO(fbarchard): Port MJpeg to C.
+ MJpegDecoder mjpeg_decoder;
+ LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size);
+ if (ret && (mjpeg_decoder.GetWidth() != w ||
+ mjpeg_decoder.GetHeight() != h)) {
+ // ERROR: MJPEG frame has unexpected dimensions
+ mjpeg_decoder.UnloadFrame();
+ return 1; // runtime failure
+ }
+ if (ret) {
+ I420Buffers bufs = { y, y_stride, u, u_stride, v, v_stride, dw, dh };
+ // YUV420
+ if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 2 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegCopyI420, &bufs, dw, dh);
+ // YUV422
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToI420, &bufs, dw, dh);
+ // YUV444
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToI420, &bufs, dw, dh);
+ // YUV411
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 4 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI411ToI420, &bufs, dw, dh);
+ // YUV400
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceGrayscale &&
+ mjpeg_decoder.GetNumComponents() == 1 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToI420, &bufs, dw, dh);
+ } else {
+ // TODO(fbarchard): Implement conversion for any other colorspace/sample
+ // factors that occur in practice. 411 is supported by libjpeg
+ // ERROR: Unable to convert MJPEG frame because format is not supported
+ mjpeg_decoder.UnloadFrame();
+ return 1;
+ }
+ }
+ return ret ? 0 : 1;
+}
+
+#ifdef HAVE_JPEG
+struct ARGBBuffers {
+ uint8* argb;
+ int argb_stride;
+ int w;
+ int h;
+};
+
+static void JpegI420ToARGB(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ ARGBBuffers* dest = (ARGBBuffers*)(opaque);
+ I420ToARGB(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->argb, dest->argb_stride,
+ dest->w, rows);
+ dest->argb += rows * dest->argb_stride;
+ dest->h -= rows;
+}
+
+static void JpegI422ToARGB(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ ARGBBuffers* dest = (ARGBBuffers*)(opaque);
+ I422ToARGB(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->argb, dest->argb_stride,
+ dest->w, rows);
+ dest->argb += rows * dest->argb_stride;
+ dest->h -= rows;
+}
+
+static void JpegI444ToARGB(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ ARGBBuffers* dest = (ARGBBuffers*)(opaque);
+ I444ToARGB(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->argb, dest->argb_stride,
+ dest->w, rows);
+ dest->argb += rows * dest->argb_stride;
+ dest->h -= rows;
+}
+
+static void JpegI411ToARGB(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ ARGBBuffers* dest = (ARGBBuffers*)(opaque);
+ I411ToARGB(data[0], strides[0],
+ data[1], strides[1],
+ data[2], strides[2],
+ dest->argb, dest->argb_stride,
+ dest->w, rows);
+ dest->argb += rows * dest->argb_stride;
+ dest->h -= rows;
+}
+
+static void JpegI400ToARGB(void* opaque,
+ const uint8* const* data,
+ const int* strides,
+ int rows) {
+ ARGBBuffers* dest = (ARGBBuffers*)(opaque);
+ I400ToARGB(data[0], strides[0],
+ dest->argb, dest->argb_stride,
+ dest->w, rows);
+ dest->argb += rows * dest->argb_stride;
+ dest->h -= rows;
+}
+
+// MJPG (Motion JPeg) to ARGB
+// TODO(fbarchard): review w and h requirement. dw and dh may be enough.
+LIBYUV_API
+int MJPGToARGB(const uint8* sample,
+ size_t sample_size,
+ uint8* argb, int argb_stride,
+ int w, int h,
+ int dw, int dh) {
+ if (sample_size == kUnknownDataSize) {
+ // ERROR: MJPEG frame size unknown
+ return -1;
+ }
+
+ // TODO(fbarchard): Port MJpeg to C.
+ MJpegDecoder mjpeg_decoder;
+ LIBYUV_BOOL ret = mjpeg_decoder.LoadFrame(sample, sample_size);
+ if (ret && (mjpeg_decoder.GetWidth() != w ||
+ mjpeg_decoder.GetHeight() != h)) {
+ // ERROR: MJPEG frame has unexpected dimensions
+ mjpeg_decoder.UnloadFrame();
+ return 1; // runtime failure
+ }
+ if (ret) {
+ ARGBBuffers bufs = { argb, argb_stride, dw, dh };
+ // YUV420
+ if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 2 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI420ToARGB, &bufs, dw, dh);
+ // YUV422
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 2 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI422ToARGB, &bufs, dw, dh);
+ // YUV444
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI444ToARGB, &bufs, dw, dh);
+ // YUV411
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceYCbCr &&
+ mjpeg_decoder.GetNumComponents() == 3 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 4 &&
+ mjpeg_decoder.GetVertSampFactor(1) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(1) == 1 &&
+ mjpeg_decoder.GetVertSampFactor(2) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(2) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI411ToARGB, &bufs, dw, dh);
+ // YUV400
+ } else if (mjpeg_decoder.GetColorSpace() ==
+ MJpegDecoder::kColorSpaceGrayscale &&
+ mjpeg_decoder.GetNumComponents() == 1 &&
+ mjpeg_decoder.GetVertSampFactor(0) == 1 &&
+ mjpeg_decoder.GetHorizSampFactor(0) == 1) {
+ ret = mjpeg_decoder.DecodeToCallback(&JpegI400ToARGB, &bufs, dw, dh);
+ } else {
+ // TODO(fbarchard): Implement conversion for any other colorspace/sample
+ // factors that occur in practice. 411 is supported by libjpeg
+ // ERROR: Unable to convert MJPEG frame because format is not supported
+ mjpeg_decoder.UnloadFrame();
+ return 1;
+ }
+ }
+ return ret ? 0 : 1;
+}
+#endif
+
+#endif
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert_to_argb.cc b/media/libaom/src/third_party/libyuv/source/convert_to_argb.cc
new file mode 100644
index 000000000..af829fbd3
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert_to_argb.cc
@@ -0,0 +1,306 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/convert_argb.h"
+
+#include "libyuv/cpu_id.h"
+#ifdef HAVE_JPEG
+#include "libyuv/mjpeg_decoder.h"
+#endif
+#include "libyuv/rotate_argb.h"
+#include "libyuv/row.h"
+#include "libyuv/video_common.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Convert camera sample to I420 with cropping, rotation and vertical flip.
+// src_width is used for source stride computation
+// src_height is used to compute location of planes, and indicate inversion
+// sample_size is measured in bytes and is the size of the frame.
+// With MJPEG it is the compressed size of the frame.
+LIBYUV_API
+int ConvertToARGB(const uint8* sample, size_t sample_size,
+ uint8* crop_argb, int argb_stride,
+ int crop_x, int crop_y,
+ int src_width, int src_height,
+ int crop_width, int crop_height,
+ enum RotationMode rotation,
+ uint32 fourcc) {
+ uint32 format = CanonicalFourCC(fourcc);
+ int aligned_src_width = (src_width + 1) & ~1;
+ const uint8* src;
+ const uint8* src_uv;
+ int abs_src_height = (src_height < 0) ? -src_height : src_height;
+ int inv_crop_height = (crop_height < 0) ? -crop_height : crop_height;
+ int r = 0;
+
+ // One pass rotation is available for some formats. For the rest, convert
+ // to I420 (with optional vertical flipping) into a temporary I420 buffer,
+ // and then rotate the I420 to the final destination buffer.
+ // For in-place conversion, if destination crop_argb is same as source sample,
+ // also enable temporary buffer.
+ LIBYUV_BOOL need_buf = (rotation && format != FOURCC_ARGB) ||
+ crop_argb == sample;
+ uint8* tmp_argb = crop_argb;
+ int tmp_argb_stride = argb_stride;
+ uint8* rotate_buffer = NULL;
+ int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height;
+
+ if (crop_argb == NULL || sample == NULL ||
+ src_width <= 0 || crop_width <= 0 ||
+ src_height == 0 || crop_height == 0) {
+ return -1;
+ }
+ if (src_height < 0) {
+ inv_crop_height = -inv_crop_height;
+ }
+
+ if (need_buf) {
+ int argb_size = crop_width * abs_crop_height * 4;
+ rotate_buffer = (uint8*)malloc(argb_size);
+ if (!rotate_buffer) {
+ return 1; // Out of memory runtime error.
+ }
+ crop_argb = rotate_buffer;
+ argb_stride = crop_width;
+ }
+
+ switch (format) {
+ // Single plane formats
+ case FOURCC_YUY2:
+ src = sample + (aligned_src_width * crop_y + crop_x) * 2;
+ r = YUY2ToARGB(src, aligned_src_width * 2,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_UYVY:
+ src = sample + (aligned_src_width * crop_y + crop_x) * 2;
+ r = UYVYToARGB(src, aligned_src_width * 2,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_24BG:
+ src = sample + (src_width * crop_y + crop_x) * 3;
+ r = RGB24ToARGB(src, src_width * 3,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RAW:
+ src = sample + (src_width * crop_y + crop_x) * 3;
+ r = RAWToARGB(src, src_width * 3,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_ARGB:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = ARGBToARGB(src, src_width * 4,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_BGRA:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = BGRAToARGB(src, src_width * 4,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_ABGR:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = ABGRToARGB(src, src_width * 4,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RGBA:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = RGBAToARGB(src, src_width * 4,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RGBP:
+ src = sample + (src_width * crop_y + crop_x) * 2;
+ r = RGB565ToARGB(src, src_width * 2,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RGBO:
+ src = sample + (src_width * crop_y + crop_x) * 2;
+ r = ARGB1555ToARGB(src, src_width * 2,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_R444:
+ src = sample + (src_width * crop_y + crop_x) * 2;
+ r = ARGB4444ToARGB(src, src_width * 2,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_I400:
+ src = sample + src_width * crop_y + crop_x;
+ r = I400ToARGB(src, src_width,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+
+ // Biplanar formats
+ case FOURCC_NV12:
+ src = sample + (src_width * crop_y + crop_x);
+ src_uv = sample + aligned_src_width * (src_height + crop_y / 2) + crop_x;
+ r = NV12ToARGB(src, src_width,
+ src_uv, aligned_src_width,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_NV21:
+ src = sample + (src_width * crop_y + crop_x);
+ src_uv = sample + aligned_src_width * (src_height + crop_y / 2) + crop_x;
+ // Call NV12 but with u and v parameters swapped.
+ r = NV21ToARGB(src, src_width,
+ src_uv, aligned_src_width,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_M420:
+ src = sample + (src_width * crop_y) * 12 / 8 + crop_x;
+ r = M420ToARGB(src, src_width,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ // Triplanar formats
+ case FOURCC_I420:
+ case FOURCC_YU12:
+ case FOURCC_YV12: {
+ const uint8* src_y = sample + (src_width * crop_y + crop_x);
+ const uint8* src_u;
+ const uint8* src_v;
+ int halfwidth = (src_width + 1) / 2;
+ int halfheight = (abs_src_height + 1) / 2;
+ if (format == FOURCC_YV12) {
+ src_v = sample + src_width * abs_src_height +
+ (halfwidth * crop_y + crop_x) / 2;
+ src_u = sample + src_width * abs_src_height +
+ halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
+ } else {
+ src_u = sample + src_width * abs_src_height +
+ (halfwidth * crop_y + crop_x) / 2;
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
+ }
+ r = I420ToARGB(src_y, src_width,
+ src_u, halfwidth,
+ src_v, halfwidth,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+
+ case FOURCC_J420: {
+ const uint8* src_y = sample + (src_width * crop_y + crop_x);
+ const uint8* src_u;
+ const uint8* src_v;
+ int halfwidth = (src_width + 1) / 2;
+ int halfheight = (abs_src_height + 1) / 2;
+ src_u = sample + src_width * abs_src_height +
+ (halfwidth * crop_y + crop_x) / 2;
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
+ r = J420ToARGB(src_y, src_width,
+ src_u, halfwidth,
+ src_v, halfwidth,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+
+ case FOURCC_I422:
+ case FOURCC_YV16: {
+ const uint8* src_y = sample + src_width * crop_y + crop_x;
+ const uint8* src_u;
+ const uint8* src_v;
+ int halfwidth = (src_width + 1) / 2;
+ if (format == FOURCC_YV16) {
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * crop_y + crop_x / 2;
+ src_u = sample + src_width * abs_src_height +
+ halfwidth * (abs_src_height + crop_y) + crop_x / 2;
+ } else {
+ src_u = sample + src_width * abs_src_height +
+ halfwidth * crop_y + crop_x / 2;
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * (abs_src_height + crop_y) + crop_x / 2;
+ }
+ r = I422ToARGB(src_y, src_width,
+ src_u, halfwidth,
+ src_v, halfwidth,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+ case FOURCC_I444:
+ case FOURCC_YV24: {
+ const uint8* src_y = sample + src_width * crop_y + crop_x;
+ const uint8* src_u;
+ const uint8* src_v;
+ if (format == FOURCC_YV24) {
+ src_v = sample + src_width * (abs_src_height + crop_y) + crop_x;
+ src_u = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
+ } else {
+ src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
+ src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
+ }
+ r = I444ToARGB(src_y, src_width,
+ src_u, src_width,
+ src_v, src_width,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+ case FOURCC_I411: {
+ int quarterwidth = (src_width + 3) / 4;
+ const uint8* src_y = sample + src_width * crop_y + crop_x;
+ const uint8* src_u = sample + src_width * abs_src_height +
+ quarterwidth * crop_y + crop_x / 4;
+ const uint8* src_v = sample + src_width * abs_src_height +
+ quarterwidth * (abs_src_height + crop_y) + crop_x / 4;
+ r = I411ToARGB(src_y, src_width,
+ src_u, quarterwidth,
+ src_v, quarterwidth,
+ crop_argb, argb_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+#ifdef HAVE_JPEG
+ case FOURCC_MJPG:
+ r = MJPGToARGB(sample, sample_size,
+ crop_argb, argb_stride,
+ src_width, abs_src_height, crop_width, inv_crop_height);
+ break;
+#endif
+ default:
+ r = -1; // unknown fourcc - return failure code.
+ }
+
+ if (need_buf) {
+ if (!r) {
+ r = ARGBRotate(crop_argb, argb_stride,
+ tmp_argb, tmp_argb_stride,
+ crop_width, abs_crop_height, rotation);
+ }
+ free(rotate_buffer);
+ }
+
+ return r;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/convert_to_i420.cc b/media/libaom/src/third_party/libyuv/source/convert_to_i420.cc
new file mode 100644
index 000000000..5e75369b5
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/convert_to_i420.cc
@@ -0,0 +1,339 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include <stdlib.h>
+
+#include "libyuv/convert.h"
+
+#include "libyuv/video_common.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Convert camera sample to I420 with cropping, rotation and vertical flip.
+// src_width is used for source stride computation
+// src_height is used to compute location of planes, and indicate inversion
+// sample_size is measured in bytes and is the size of the frame.
+// With MJPEG it is the compressed size of the frame.
+LIBYUV_API
+int ConvertToI420(const uint8* sample,
+ size_t sample_size,
+ uint8* y, int y_stride,
+ uint8* u, int u_stride,
+ uint8* v, int v_stride,
+ int crop_x, int crop_y,
+ int src_width, int src_height,
+ int crop_width, int crop_height,
+ enum RotationMode rotation,
+ uint32 fourcc) {
+ uint32 format = CanonicalFourCC(fourcc);
+ int aligned_src_width = (src_width + 1) & ~1;
+ const uint8* src;
+ const uint8* src_uv;
+ int abs_src_height = (src_height < 0) ? -src_height : src_height;
+ int inv_crop_height = (crop_height < 0) ? -crop_height : crop_height;
+ int r = 0;
+ LIBYUV_BOOL need_buf = (rotation && format != FOURCC_I420 &&
+ format != FOURCC_NV12 && format != FOURCC_NV21 &&
+ format != FOURCC_YU12 && format != FOURCC_YV12) || y == sample;
+ uint8* tmp_y = y;
+ uint8* tmp_u = u;
+ uint8* tmp_v = v;
+ int tmp_y_stride = y_stride;
+ int tmp_u_stride = u_stride;
+ int tmp_v_stride = v_stride;
+ uint8* rotate_buffer = NULL;
+ int abs_crop_height = (crop_height < 0) ? -crop_height : crop_height;
+
+ if (!y || !u || !v || !sample ||
+ src_width <= 0 || crop_width <= 0 ||
+ src_height == 0 || crop_height == 0) {
+ return -1;
+ }
+ if (src_height < 0) {
+ inv_crop_height = -inv_crop_height;
+ }
+
+ // One pass rotation is available for some formats. For the rest, convert
+ // to I420 (with optional vertical flipping) into a temporary I420 buffer,
+ // and then rotate the I420 to the final destination buffer.
+ // For in-place conversion, if destination y is same as source sample,
+ // also enable temporary buffer.
+ if (need_buf) {
+ int y_size = crop_width * abs_crop_height;
+ int uv_size = ((crop_width + 1) / 2) * ((abs_crop_height + 1) / 2);
+ rotate_buffer = (uint8*)malloc(y_size + uv_size * 2);
+ if (!rotate_buffer) {
+ return 1; // Out of memory runtime error.
+ }
+ y = rotate_buffer;
+ u = y + y_size;
+ v = u + uv_size;
+ y_stride = crop_width;
+ u_stride = v_stride = ((crop_width + 1) / 2);
+ }
+
+ switch (format) {
+ // Single plane formats
+ case FOURCC_YUY2:
+ src = sample + (aligned_src_width * crop_y + crop_x) * 2;
+ r = YUY2ToI420(src, aligned_src_width * 2,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_UYVY:
+ src = sample + (aligned_src_width * crop_y + crop_x) * 2;
+ r = UYVYToI420(src, aligned_src_width * 2,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RGBP:
+ src = sample + (src_width * crop_y + crop_x) * 2;
+ r = RGB565ToI420(src, src_width * 2,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RGBO:
+ src = sample + (src_width * crop_y + crop_x) * 2;
+ r = ARGB1555ToI420(src, src_width * 2,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_R444:
+ src = sample + (src_width * crop_y + crop_x) * 2;
+ r = ARGB4444ToI420(src, src_width * 2,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_24BG:
+ src = sample + (src_width * crop_y + crop_x) * 3;
+ r = RGB24ToI420(src, src_width * 3,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RAW:
+ src = sample + (src_width * crop_y + crop_x) * 3;
+ r = RAWToI420(src, src_width * 3,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_ARGB:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = ARGBToI420(src, src_width * 4,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_BGRA:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = BGRAToI420(src, src_width * 4,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_ABGR:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = ABGRToI420(src, src_width * 4,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_RGBA:
+ src = sample + (src_width * crop_y + crop_x) * 4;
+ r = RGBAToI420(src, src_width * 4,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ case FOURCC_I400:
+ src = sample + src_width * crop_y + crop_x;
+ r = I400ToI420(src, src_width,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ // Biplanar formats
+ case FOURCC_NV12:
+ src = sample + (src_width * crop_y + crop_x);
+ src_uv = sample + (src_width * src_height) +
+ ((crop_y / 2) * aligned_src_width) + ((crop_x / 2) * 2);
+ r = NV12ToI420Rotate(src, src_width,
+ src_uv, aligned_src_width,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height, rotation);
+ break;
+ case FOURCC_NV21:
+ src = sample + (src_width * crop_y + crop_x);
+ src_uv = sample + (src_width * src_height) +
+ ((crop_y / 2) * aligned_src_width) + ((crop_x / 2) * 2);
+ // Call NV12 but with u and v parameters swapped.
+ r = NV12ToI420Rotate(src, src_width,
+ src_uv, aligned_src_width,
+ y, y_stride,
+ v, v_stride,
+ u, u_stride,
+ crop_width, inv_crop_height, rotation);
+ break;
+ case FOURCC_M420:
+ src = sample + (src_width * crop_y) * 12 / 8 + crop_x;
+ r = M420ToI420(src, src_width,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ // Triplanar formats
+ case FOURCC_I420:
+ case FOURCC_YU12:
+ case FOURCC_YV12: {
+ const uint8* src_y = sample + (src_width * crop_y + crop_x);
+ const uint8* src_u;
+ const uint8* src_v;
+ int halfwidth = (src_width + 1) / 2;
+ int halfheight = (abs_src_height + 1) / 2;
+ if (format == FOURCC_YV12) {
+ src_v = sample + src_width * abs_src_height +
+ (halfwidth * crop_y + crop_x) / 2;
+ src_u = sample + src_width * abs_src_height +
+ halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
+ } else {
+ src_u = sample + src_width * abs_src_height +
+ (halfwidth * crop_y + crop_x) / 2;
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * (halfheight + crop_y / 2) + crop_x / 2;
+ }
+ r = I420Rotate(src_y, src_width,
+ src_u, halfwidth,
+ src_v, halfwidth,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height, rotation);
+ break;
+ }
+ case FOURCC_I422:
+ case FOURCC_YV16: {
+ const uint8* src_y = sample + src_width * crop_y + crop_x;
+ const uint8* src_u;
+ const uint8* src_v;
+ int halfwidth = (src_width + 1) / 2;
+ if (format == FOURCC_YV16) {
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * crop_y + crop_x / 2;
+ src_u = sample + src_width * abs_src_height +
+ halfwidth * (abs_src_height + crop_y) + crop_x / 2;
+ } else {
+ src_u = sample + src_width * abs_src_height +
+ halfwidth * crop_y + crop_x / 2;
+ src_v = sample + src_width * abs_src_height +
+ halfwidth * (abs_src_height + crop_y) + crop_x / 2;
+ }
+ r = I422ToI420(src_y, src_width,
+ src_u, halfwidth,
+ src_v, halfwidth,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+ case FOURCC_I444:
+ case FOURCC_YV24: {
+ const uint8* src_y = sample + src_width * crop_y + crop_x;
+ const uint8* src_u;
+ const uint8* src_v;
+ if (format == FOURCC_YV24) {
+ src_v = sample + src_width * (abs_src_height + crop_y) + crop_x;
+ src_u = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
+ } else {
+ src_u = sample + src_width * (abs_src_height + crop_y) + crop_x;
+ src_v = sample + src_width * (abs_src_height * 2 + crop_y) + crop_x;
+ }
+ r = I444ToI420(src_y, src_width,
+ src_u, src_width,
+ src_v, src_width,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+ case FOURCC_I411: {
+ int quarterwidth = (src_width + 3) / 4;
+ const uint8* src_y = sample + src_width * crop_y + crop_x;
+ const uint8* src_u = sample + src_width * abs_src_height +
+ quarterwidth * crop_y + crop_x / 4;
+ const uint8* src_v = sample + src_width * abs_src_height +
+ quarterwidth * (abs_src_height + crop_y) + crop_x / 4;
+ r = I411ToI420(src_y, src_width,
+ src_u, quarterwidth,
+ src_v, quarterwidth,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ crop_width, inv_crop_height);
+ break;
+ }
+#ifdef HAVE_JPEG
+ case FOURCC_MJPG:
+ r = MJPGToI420(sample, sample_size,
+ y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ src_width, abs_src_height, crop_width, inv_crop_height);
+ break;
+#endif
+ default:
+ r = -1; // unknown fourcc - return failure code.
+ }
+
+ if (need_buf) {
+ if (!r) {
+ r = I420Rotate(y, y_stride,
+ u, u_stride,
+ v, v_stride,
+ tmp_y, tmp_y_stride,
+ tmp_u, tmp_u_stride,
+ tmp_v, tmp_v_stride,
+ crop_width, abs_crop_height, rotation);
+ }
+ free(rotate_buffer);
+ }
+
+ return r;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/cpu_id.cc b/media/libaom/src/third_party/libyuv/source/cpu_id.cc
new file mode 100644
index 000000000..72f686e3b
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/cpu_id.cc
@@ -0,0 +1,307 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/cpu_id.h"
+
+#if (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__)
+#include <intrin.h> // For __cpuidex()
+#endif
+#if !defined(__pnacl__) && !defined(__CLR_VER) && \
+ !defined(__native_client__) && (defined(_M_IX86) || defined(_M_X64)) && \
+ defined(_MSC_VER) && !defined(__clang__) && (_MSC_FULL_VER >= 160040219)
+#include <immintrin.h> // For _xgetbv()
+#endif
+
+#if !defined(__native_client__)
+#include <stdlib.h> // For getenv()
+#endif
+
+// For ArmCpuCaps() but unittested on all platforms
+#include <stdio.h>
+#include <string.h>
+
+#include "libyuv/basic_types.h" // For CPU_X86
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// For functions that use the stack and have runtime checks for overflow,
+// use SAFEBUFFERS to avoid additional check.
+#if (defined(_MSC_VER) && !defined(__clang__)) && (_MSC_FULL_VER >= 160040219)
+#define SAFEBUFFERS __declspec(safebuffers)
+#else
+#define SAFEBUFFERS
+#endif
+
+// Low level cpuid for X86.
+#if (defined(_M_IX86) || defined(_M_X64) || \
+ defined(__i386__) || defined(__x86_64__)) && \
+ !defined(__pnacl__) && !defined(__CLR_VER)
+LIBYUV_API
+void CpuId(uint32 info_eax, uint32 info_ecx, uint32* cpu_info) {
+#if (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__)
+// Visual C version uses intrinsic or inline x86 assembly.
+#if (_MSC_FULL_VER >= 160040219)
+ __cpuidex((int*)(cpu_info), info_eax, info_ecx);
+#elif defined(_M_IX86)
+ __asm {
+ mov eax, info_eax
+ mov ecx, info_ecx
+ mov edi, cpu_info
+ cpuid
+ mov [edi], eax
+ mov [edi + 4], ebx
+ mov [edi + 8], ecx
+ mov [edi + 12], edx
+ }
+#else
+ if (info_ecx == 0) {
+ __cpuid((int*)(cpu_info), info_eax);
+ } else {
+ cpu_info[3] = cpu_info[2] = cpu_info[1] = cpu_info[0] = 0;
+ }
+#endif
+// GCC version uses inline x86 assembly.
+#else // (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__)
+ uint32 info_ebx, info_edx;
+ asm volatile ( // NOLINT
+#if defined( __i386__) && defined(__PIC__)
+ // Preserve ebx for fpic 32 bit.
+ "mov %%ebx, %%edi \n"
+ "cpuid \n"
+ "xchg %%edi, %%ebx \n"
+ : "=D" (info_ebx),
+#else
+ "cpuid \n"
+ : "=b" (info_ebx),
+#endif // defined( __i386__) && defined(__PIC__)
+ "+a" (info_eax), "+c" (info_ecx), "=d" (info_edx));
+ cpu_info[0] = info_eax;
+ cpu_info[1] = info_ebx;
+ cpu_info[2] = info_ecx;
+ cpu_info[3] = info_edx;
+#endif // (defined(_MSC_VER) && !defined(__clang__)) && !defined(__clang__)
+}
+#else // (defined(_M_IX86) || defined(_M_X64) ...
+LIBYUV_API
+void CpuId(uint32 eax, uint32 ecx, uint32* cpu_info) {
+ cpu_info[0] = cpu_info[1] = cpu_info[2] = cpu_info[3] = 0;
+}
+#endif
+
+// TODO(fbarchard): Enable xgetbv when validator supports it.
+#if (defined(_M_IX86) || defined(_M_X64) || \
+ defined(__i386__) || defined(__x86_64__)) && \
+ !defined(__pnacl__) && !defined(__CLR_VER) && !defined(__native_client__)
+#define HAS_XGETBV
+// X86 CPUs have xgetbv to detect OS saves high parts of ymm registers.
+int TestOsSaveYmm() {
+ uint32 xcr0 = 0u;
+#if (defined(_MSC_VER) && !defined(__clang__)) && (_MSC_FULL_VER >= 160040219)
+ xcr0 = (uint32)(_xgetbv(0)); // VS2010 SP1 required.
+#elif defined(_M_IX86) && defined(_MSC_VER) && !defined(__clang__)
+ __asm {
+ xor ecx, ecx // xcr 0
+ _asm _emit 0x0f _asm _emit 0x01 _asm _emit 0xd0 // For VS2010 and earlier.
+ mov xcr0, eax
+ }
+#elif defined(__i386__) || defined(__x86_64__)
+ asm(".byte 0x0f, 0x01, 0xd0" : "=a" (xcr0) : "c" (0) : "%edx");
+#endif // defined(__i386__) || defined(__x86_64__)
+ return((xcr0 & 6) == 6); // Is ymm saved?
+}
+#endif // defined(_M_IX86) || defined(_M_X64) ..
+
+// based on libaom arm_cpudetect.c
+// For Arm, but public to allow testing on any CPU
+LIBYUV_API SAFEBUFFERS
+int ArmCpuCaps(const char* cpuinfo_name) {
+ char cpuinfo_line[512];
+ FILE* f = fopen(cpuinfo_name, "r");
+ if (!f) {
+ // Assume Neon if /proc/cpuinfo is unavailable.
+ // This will occur for Chrome sandbox for Pepper or Render process.
+ return kCpuHasNEON;
+ }
+ while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f)) {
+ if (memcmp(cpuinfo_line, "Features", 8) == 0) {
+ char* p = strstr(cpuinfo_line, " neon");
+ if (p && (p[5] == ' ' || p[5] == '\n')) {
+ fclose(f);
+ return kCpuHasNEON;
+ }
+ // aarch64 uses asimd for Neon.
+ p = strstr(cpuinfo_line, " asimd");
+ if (p && (p[6] == ' ' || p[6] == '\n')) {
+ fclose(f);
+ return kCpuHasNEON;
+ }
+ }
+ }
+ fclose(f);
+ return 0;
+}
+
+#if defined(__mips__) && defined(__linux__)
+static int MipsCpuCaps(const char* search_string) {
+ char cpuinfo_line[512];
+ const char* file_name = "/proc/cpuinfo";
+ FILE* f = fopen(file_name, "r");
+ if (!f) {
+ // Assume DSP if /proc/cpuinfo is unavailable.
+ // This will occur for Chrome sandbox for Pepper or Render process.
+ return kCpuHasMIPS_DSP;
+ }
+ while (fgets(cpuinfo_line, sizeof(cpuinfo_line) - 1, f) != NULL) {
+ if (strstr(cpuinfo_line, search_string) != NULL) {
+ fclose(f);
+ return kCpuHasMIPS_DSP;
+ }
+ }
+ fclose(f);
+ return 0;
+}
+#endif
+
+// CPU detect function for SIMD instruction sets.
+LIBYUV_API
+int cpu_info_ = kCpuInit; // cpu_info is not initialized yet.
+
+// Test environment variable for disabling CPU features. Any non-zero value
+// to disable. Zero ignored to make it easy to set the variable on/off.
+#if !defined(__native_client__) && !defined(_M_ARM)
+
+static LIBYUV_BOOL TestEnv(const char* name) {
+ const char* var = getenv(name);
+ if (var) {
+ if (var[0] != '0') {
+ return LIBYUV_TRUE;
+ }
+ }
+ return LIBYUV_FALSE;
+}
+#else // nacl does not support getenv().
+static LIBYUV_BOOL TestEnv(const char*) {
+ return LIBYUV_FALSE;
+}
+#endif
+
+LIBYUV_API SAFEBUFFERS
+int InitCpuFlags(void) {
+#if !defined(__pnacl__) && !defined(__CLR_VER) && defined(CPU_X86)
+
+ uint32 cpu_info0[4] = { 0, 0, 0, 0 };
+ uint32 cpu_info1[4] = { 0, 0, 0, 0 };
+ uint32 cpu_info7[4] = { 0, 0, 0, 0 };
+ CpuId(0, 0, cpu_info0);
+ CpuId(1, 0, cpu_info1);
+ if (cpu_info0[0] >= 7) {
+ CpuId(7, 0, cpu_info7);
+ }
+ cpu_info_ = ((cpu_info1[3] & 0x04000000) ? kCpuHasSSE2 : 0) |
+ ((cpu_info1[2] & 0x00000200) ? kCpuHasSSSE3 : 0) |
+ ((cpu_info1[2] & 0x00080000) ? kCpuHasSSE41 : 0) |
+ ((cpu_info1[2] & 0x00100000) ? kCpuHasSSE42 : 0) |
+ ((cpu_info7[1] & 0x00000200) ? kCpuHasERMS : 0) |
+ ((cpu_info1[2] & 0x00001000) ? kCpuHasFMA3 : 0) |
+ kCpuHasX86;
+
+#ifdef HAS_XGETBV
+ if ((cpu_info1[2] & 0x18000000) == 0x18000000 && // AVX and OSSave
+ TestOsSaveYmm()) { // Saves YMM.
+ cpu_info_ |= ((cpu_info7[1] & 0x00000020) ? kCpuHasAVX2 : 0) |
+ kCpuHasAVX;
+ }
+#endif
+ // Environment variable overrides for testing.
+ if (TestEnv("LIBYUV_DISABLE_X86")) {
+ cpu_info_ &= ~kCpuHasX86;
+ }
+ if (TestEnv("LIBYUV_DISABLE_SSE2")) {
+ cpu_info_ &= ~kCpuHasSSE2;
+ }
+ if (TestEnv("LIBYUV_DISABLE_SSSE3")) {
+ cpu_info_ &= ~kCpuHasSSSE3;
+ }
+ if (TestEnv("LIBYUV_DISABLE_SSE41")) {
+ cpu_info_ &= ~kCpuHasSSE41;
+ }
+ if (TestEnv("LIBYUV_DISABLE_SSE42")) {
+ cpu_info_ &= ~kCpuHasSSE42;
+ }
+ if (TestEnv("LIBYUV_DISABLE_AVX")) {
+ cpu_info_ &= ~kCpuHasAVX;
+ }
+ if (TestEnv("LIBYUV_DISABLE_AVX2")) {
+ cpu_info_ &= ~kCpuHasAVX2;
+ }
+ if (TestEnv("LIBYUV_DISABLE_ERMS")) {
+ cpu_info_ &= ~kCpuHasERMS;
+ }
+ if (TestEnv("LIBYUV_DISABLE_FMA3")) {
+ cpu_info_ &= ~kCpuHasFMA3;
+ }
+#endif
+#if defined(__mips__) && defined(__linux__)
+ // Linux mips parse text file for dsp detect.
+ cpu_info_ = MipsCpuCaps("dsp"); // set kCpuHasMIPS_DSP.
+#if defined(__mips_dspr2)
+ cpu_info_ |= kCpuHasMIPS_DSPR2;
+#endif
+ cpu_info_ |= kCpuHasMIPS;
+
+ if (getenv("LIBYUV_DISABLE_MIPS")) {
+ cpu_info_ &= ~kCpuHasMIPS;
+ }
+ if (getenv("LIBYUV_DISABLE_MIPS_DSP")) {
+ cpu_info_ &= ~kCpuHasMIPS_DSP;
+ }
+ if (getenv("LIBYUV_DISABLE_MIPS_DSPR2")) {
+ cpu_info_ &= ~kCpuHasMIPS_DSPR2;
+ }
+#endif
+#if defined(__arm__) || defined(__aarch64__)
+// gcc -mfpu=neon defines __ARM_NEON__
+// __ARM_NEON__ generates code that requires Neon. NaCL also requires Neon.
+// For Linux, /proc/cpuinfo can be tested but without that assume Neon.
+#if defined(__ARM_NEON__) || defined(__native_client__) || !defined(__linux__)
+ cpu_info_ = kCpuHasNEON;
+// For aarch64(arm64), /proc/cpuinfo's feature is not complete, e.g. no neon
+// flag in it.
+// So for aarch64, neon enabling is hard coded here.
+#endif
+#if defined(__aarch64__)
+ cpu_info_ = kCpuHasNEON;
+#else
+ // Linux arm parse text file for neon detect.
+ cpu_info_ = ArmCpuCaps("/proc/cpuinfo");
+#endif
+ cpu_info_ |= kCpuHasARM;
+ if (TestEnv("LIBYUV_DISABLE_NEON")) {
+ cpu_info_ &= ~kCpuHasNEON;
+ }
+#endif // __arm__
+ if (TestEnv("LIBYUV_DISABLE_ASM")) {
+ cpu_info_ = 0;
+ }
+ return cpu_info_;
+}
+
+LIBYUV_API
+void MaskCpuFlags(int enable_flags) {
+ cpu_info_ = InitCpuFlags() & enable_flags;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/mjpeg_decoder.cc b/media/libaom/src/third_party/libyuv/source/mjpeg_decoder.cc
new file mode 100644
index 000000000..75f8a610e
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/mjpeg_decoder.cc
@@ -0,0 +1,572 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/mjpeg_decoder.h"
+
+#ifdef HAVE_JPEG
+#include <assert.h>
+
+#if !defined(__pnacl__) && !defined(__CLR_VER) && \
+ !defined(COVERAGE_ENABLED) && !defined(TARGET_IPHONE_SIMULATOR)
+// Must be included before jpeglib.
+#include <setjmp.h>
+#define HAVE_SETJMP
+
+#if defined(_MSC_VER)
+// disable warning 4324: structure was padded due to __declspec(align())
+#pragma warning(disable:4324)
+#endif
+
+#endif
+struct FILE; // For jpeglib.h.
+
+// C++ build requires extern C for jpeg internals.
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <jpeglib.h>
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#include "libyuv/planar_functions.h" // For CopyPlane().
+
+namespace libyuv {
+
+#ifdef HAVE_SETJMP
+struct SetJmpErrorMgr {
+ jpeg_error_mgr base; // Must be at the top
+ jmp_buf setjmp_buffer;
+};
+#endif
+
+const int MJpegDecoder::kColorSpaceUnknown = JCS_UNKNOWN;
+const int MJpegDecoder::kColorSpaceGrayscale = JCS_GRAYSCALE;
+const int MJpegDecoder::kColorSpaceRgb = JCS_RGB;
+const int MJpegDecoder::kColorSpaceYCbCr = JCS_YCbCr;
+const int MJpegDecoder::kColorSpaceCMYK = JCS_CMYK;
+const int MJpegDecoder::kColorSpaceYCCK = JCS_YCCK;
+
+// Methods that are passed to jpeglib.
+boolean fill_input_buffer(jpeg_decompress_struct* cinfo);
+void init_source(jpeg_decompress_struct* cinfo);
+void skip_input_data(jpeg_decompress_struct* cinfo,
+ long num_bytes); // NOLINT
+void term_source(jpeg_decompress_struct* cinfo);
+void ErrorHandler(jpeg_common_struct* cinfo);
+
+MJpegDecoder::MJpegDecoder()
+ : has_scanline_padding_(LIBYUV_FALSE),
+ num_outbufs_(0),
+ scanlines_(NULL),
+ scanlines_sizes_(NULL),
+ databuf_(NULL),
+ databuf_strides_(NULL) {
+ decompress_struct_ = new jpeg_decompress_struct;
+ source_mgr_ = new jpeg_source_mgr;
+#ifdef HAVE_SETJMP
+ error_mgr_ = new SetJmpErrorMgr;
+ decompress_struct_->err = jpeg_std_error(&error_mgr_->base);
+ // Override standard exit()-based error handler.
+ error_mgr_->base.error_exit = &ErrorHandler;
+#endif
+ decompress_struct_->client_data = NULL;
+ source_mgr_->init_source = &init_source;
+ source_mgr_->fill_input_buffer = &fill_input_buffer;
+ source_mgr_->skip_input_data = &skip_input_data;
+ source_mgr_->resync_to_restart = &jpeg_resync_to_restart;
+ source_mgr_->term_source = &term_source;
+ jpeg_create_decompress(decompress_struct_);
+ decompress_struct_->src = source_mgr_;
+ buf_vec_.buffers = &buf_;
+ buf_vec_.len = 1;
+}
+
+MJpegDecoder::~MJpegDecoder() {
+ jpeg_destroy_decompress(decompress_struct_);
+ delete decompress_struct_;
+ delete source_mgr_;
+#ifdef HAVE_SETJMP
+ delete error_mgr_;
+#endif
+ DestroyOutputBuffers();
+}
+
+LIBYUV_BOOL MJpegDecoder::LoadFrame(const uint8* src, size_t src_len) {
+ if (!ValidateJpeg(src, src_len)) {
+ return LIBYUV_FALSE;
+ }
+
+ buf_.data = src;
+ buf_.len = static_cast<int>(src_len);
+ buf_vec_.pos = 0;
+ decompress_struct_->client_data = &buf_vec_;
+#ifdef HAVE_SETJMP
+ if (setjmp(error_mgr_->setjmp_buffer)) {
+ // We called jpeg_read_header, it experienced an error, and we called
+ // longjmp() and rewound the stack to here. Return error.
+ return LIBYUV_FALSE;
+ }
+#endif
+ if (jpeg_read_header(decompress_struct_, TRUE) != JPEG_HEADER_OK) {
+ // ERROR: Bad MJPEG header
+ return LIBYUV_FALSE;
+ }
+ AllocOutputBuffers(GetNumComponents());
+ for (int i = 0; i < num_outbufs_; ++i) {
+ int scanlines_size = GetComponentScanlinesPerImcuRow(i);
+ if (scanlines_sizes_[i] != scanlines_size) {
+ if (scanlines_[i]) {
+ delete scanlines_[i];
+ }
+ scanlines_[i] = new uint8* [scanlines_size];
+ scanlines_sizes_[i] = scanlines_size;
+ }
+
+ // We allocate padding for the final scanline to pad it up to DCTSIZE bytes
+ // to avoid memory errors, since jpeglib only reads full MCUs blocks. For
+ // the preceding scanlines, the padding is not needed/wanted because the
+ // following addresses will already be valid (they are the initial bytes of
+ // the next scanline) and will be overwritten when jpeglib writes out that
+ // next scanline.
+ int databuf_stride = GetComponentStride(i);
+ int databuf_size = scanlines_size * databuf_stride;
+ if (databuf_strides_[i] != databuf_stride) {
+ if (databuf_[i]) {
+ delete databuf_[i];
+ }
+ databuf_[i] = new uint8[databuf_size];
+ databuf_strides_[i] = databuf_stride;
+ }
+
+ if (GetComponentStride(i) != GetComponentWidth(i)) {
+ has_scanline_padding_ = LIBYUV_TRUE;
+ }
+ }
+ return LIBYUV_TRUE;
+}
+
+static int DivideAndRoundUp(int numerator, int denominator) {
+ return (numerator + denominator - 1) / denominator;
+}
+
+static int DivideAndRoundDown(int numerator, int denominator) {
+ return numerator / denominator;
+}
+
+// Returns width of the last loaded frame.
+int MJpegDecoder::GetWidth() {
+ return decompress_struct_->image_width;
+}
+
+// Returns height of the last loaded frame.
+int MJpegDecoder::GetHeight() {
+ return decompress_struct_->image_height;
+}
+
+// Returns format of the last loaded frame. The return value is one of the
+// kColorSpace* constants.
+int MJpegDecoder::GetColorSpace() {
+ return decompress_struct_->jpeg_color_space;
+}
+
+// Number of color components in the color space.
+int MJpegDecoder::GetNumComponents() {
+ return decompress_struct_->num_components;
+}
+
+// Sample factors of the n-th component.
+int MJpegDecoder::GetHorizSampFactor(int component) {
+ return decompress_struct_->comp_info[component].h_samp_factor;
+}
+
+int MJpegDecoder::GetVertSampFactor(int component) {
+ return decompress_struct_->comp_info[component].v_samp_factor;
+}
+
+int MJpegDecoder::GetHorizSubSampFactor(int component) {
+ return decompress_struct_->max_h_samp_factor /
+ GetHorizSampFactor(component);
+}
+
+int MJpegDecoder::GetVertSubSampFactor(int component) {
+ return decompress_struct_->max_v_samp_factor /
+ GetVertSampFactor(component);
+}
+
+int MJpegDecoder::GetImageScanlinesPerImcuRow() {
+ return decompress_struct_->max_v_samp_factor * DCTSIZE;
+}
+
+int MJpegDecoder::GetComponentScanlinesPerImcuRow(int component) {
+ int vs = GetVertSubSampFactor(component);
+ return DivideAndRoundUp(GetImageScanlinesPerImcuRow(), vs);
+}
+
+int MJpegDecoder::GetComponentWidth(int component) {
+ int hs = GetHorizSubSampFactor(component);
+ return DivideAndRoundUp(GetWidth(), hs);
+}
+
+int MJpegDecoder::GetComponentHeight(int component) {
+ int vs = GetVertSubSampFactor(component);
+ return DivideAndRoundUp(GetHeight(), vs);
+}
+
+// Get width in bytes padded out to a multiple of DCTSIZE
+int MJpegDecoder::GetComponentStride(int component) {
+ return (GetComponentWidth(component) + DCTSIZE - 1) & ~(DCTSIZE - 1);
+}
+
+int MJpegDecoder::GetComponentSize(int component) {
+ return GetComponentWidth(component) * GetComponentHeight(component);
+}
+
+LIBYUV_BOOL MJpegDecoder::UnloadFrame() {
+#ifdef HAVE_SETJMP
+ if (setjmp(error_mgr_->setjmp_buffer)) {
+ // We called jpeg_abort_decompress, it experienced an error, and we called
+ // longjmp() and rewound the stack to here. Return error.
+ return LIBYUV_FALSE;
+ }
+#endif
+ jpeg_abort_decompress(decompress_struct_);
+ return LIBYUV_TRUE;
+}
+
+// TODO(fbarchard): Allow rectangle to be specified: x, y, width, height.
+LIBYUV_BOOL MJpegDecoder::DecodeToBuffers(
+ uint8** planes, int dst_width, int dst_height) {
+ if (dst_width != GetWidth() ||
+ dst_height > GetHeight()) {
+ // ERROR: Bad dimensions
+ return LIBYUV_FALSE;
+ }
+#ifdef HAVE_SETJMP
+ if (setjmp(error_mgr_->setjmp_buffer)) {
+ // We called into jpeglib, it experienced an error sometime during this
+ // function call, and we called longjmp() and rewound the stack to here.
+ // Return error.
+ return LIBYUV_FALSE;
+ }
+#endif
+ if (!StartDecode()) {
+ return LIBYUV_FALSE;
+ }
+ SetScanlinePointers(databuf_);
+ int lines_left = dst_height;
+ // Compute amount of lines to skip to implement vertical crop.
+ // TODO(fbarchard): Ensure skip is a multiple of maximum component
+ // subsample. ie 2
+ int skip = (GetHeight() - dst_height) / 2;
+ if (skip > 0) {
+ // There is no API to skip lines in the output data, so we read them
+ // into the temp buffer.
+ while (skip >= GetImageScanlinesPerImcuRow()) {
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ skip -= GetImageScanlinesPerImcuRow();
+ }
+ if (skip > 0) {
+ // Have a partial iMCU row left over to skip. Must read it and then
+ // copy the parts we want into the destination.
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ for (int i = 0; i < num_outbufs_; ++i) {
+ // TODO(fbarchard): Compute skip to avoid this
+ assert(skip % GetVertSubSampFactor(i) == 0);
+ int rows_to_skip =
+ DivideAndRoundDown(skip, GetVertSubSampFactor(i));
+ int scanlines_to_copy = GetComponentScanlinesPerImcuRow(i) -
+ rows_to_skip;
+ int data_to_skip = rows_to_skip * GetComponentStride(i);
+ CopyPlane(databuf_[i] + data_to_skip, GetComponentStride(i),
+ planes[i], GetComponentWidth(i),
+ GetComponentWidth(i), scanlines_to_copy);
+ planes[i] += scanlines_to_copy * GetComponentWidth(i);
+ }
+ lines_left -= (GetImageScanlinesPerImcuRow() - skip);
+ }
+ }
+
+ // Read full MCUs but cropped horizontally
+ for (; lines_left > GetImageScanlinesPerImcuRow();
+ lines_left -= GetImageScanlinesPerImcuRow()) {
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ for (int i = 0; i < num_outbufs_; ++i) {
+ int scanlines_to_copy = GetComponentScanlinesPerImcuRow(i);
+ CopyPlane(databuf_[i], GetComponentStride(i),
+ planes[i], GetComponentWidth(i),
+ GetComponentWidth(i), scanlines_to_copy);
+ planes[i] += scanlines_to_copy * GetComponentWidth(i);
+ }
+ }
+
+ if (lines_left > 0) {
+ // Have a partial iMCU row left over to decode.
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ for (int i = 0; i < num_outbufs_; ++i) {
+ int scanlines_to_copy =
+ DivideAndRoundUp(lines_left, GetVertSubSampFactor(i));
+ CopyPlane(databuf_[i], GetComponentStride(i),
+ planes[i], GetComponentWidth(i),
+ GetComponentWidth(i), scanlines_to_copy);
+ planes[i] += scanlines_to_copy * GetComponentWidth(i);
+ }
+ }
+ return FinishDecode();
+}
+
+LIBYUV_BOOL MJpegDecoder::DecodeToCallback(CallbackFunction fn, void* opaque,
+ int dst_width, int dst_height) {
+ if (dst_width != GetWidth() ||
+ dst_height > GetHeight()) {
+ // ERROR: Bad dimensions
+ return LIBYUV_FALSE;
+ }
+#ifdef HAVE_SETJMP
+ if (setjmp(error_mgr_->setjmp_buffer)) {
+ // We called into jpeglib, it experienced an error sometime during this
+ // function call, and we called longjmp() and rewound the stack to here.
+ // Return error.
+ return LIBYUV_FALSE;
+ }
+#endif
+ if (!StartDecode()) {
+ return LIBYUV_FALSE;
+ }
+ SetScanlinePointers(databuf_);
+ int lines_left = dst_height;
+ // TODO(fbarchard): Compute amount of lines to skip to implement vertical crop
+ int skip = (GetHeight() - dst_height) / 2;
+ if (skip > 0) {
+ while (skip >= GetImageScanlinesPerImcuRow()) {
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ skip -= GetImageScanlinesPerImcuRow();
+ }
+ if (skip > 0) {
+ // Have a partial iMCU row left over to skip.
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ for (int i = 0; i < num_outbufs_; ++i) {
+ // TODO(fbarchard): Compute skip to avoid this
+ assert(skip % GetVertSubSampFactor(i) == 0);
+ int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
+ int data_to_skip = rows_to_skip * GetComponentStride(i);
+ // Change our own data buffer pointers so we can pass them to the
+ // callback.
+ databuf_[i] += data_to_skip;
+ }
+ int scanlines_to_copy = GetImageScanlinesPerImcuRow() - skip;
+ (*fn)(opaque, databuf_, databuf_strides_, scanlines_to_copy);
+ // Now change them back.
+ for (int i = 0; i < num_outbufs_; ++i) {
+ int rows_to_skip = DivideAndRoundDown(skip, GetVertSubSampFactor(i));
+ int data_to_skip = rows_to_skip * GetComponentStride(i);
+ databuf_[i] -= data_to_skip;
+ }
+ lines_left -= scanlines_to_copy;
+ }
+ }
+ // Read full MCUs until we get to the crop point.
+ for (; lines_left >= GetImageScanlinesPerImcuRow();
+ lines_left -= GetImageScanlinesPerImcuRow()) {
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ (*fn)(opaque, databuf_, databuf_strides_, GetImageScanlinesPerImcuRow());
+ }
+ if (lines_left > 0) {
+ // Have a partial iMCU row left over to decode.
+ if (!DecodeImcuRow()) {
+ FinishDecode();
+ return LIBYUV_FALSE;
+ }
+ (*fn)(opaque, databuf_, databuf_strides_, lines_left);
+ }
+ return FinishDecode();
+}
+
+void init_source(j_decompress_ptr cinfo) {
+ fill_input_buffer(cinfo);
+}
+
+boolean fill_input_buffer(j_decompress_ptr cinfo) {
+ BufferVector* buf_vec = reinterpret_cast<BufferVector*>(cinfo->client_data);
+ if (buf_vec->pos >= buf_vec->len) {
+ assert(0 && "No more data");
+ // ERROR: No more data
+ return FALSE;
+ }
+ cinfo->src->next_input_byte = buf_vec->buffers[buf_vec->pos].data;
+ cinfo->src->bytes_in_buffer = buf_vec->buffers[buf_vec->pos].len;
+ ++buf_vec->pos;
+ return TRUE;
+}
+
+void skip_input_data(j_decompress_ptr cinfo,
+ long num_bytes) { // NOLINT
+ cinfo->src->next_input_byte += num_bytes;
+}
+
+void term_source(j_decompress_ptr cinfo) {
+ // Nothing to do.
+}
+
+#ifdef HAVE_SETJMP
+void ErrorHandler(j_common_ptr cinfo) {
+ // This is called when a jpeglib command experiences an error. Unfortunately
+ // jpeglib's error handling model is not very flexible, because it expects the
+ // error handler to not return--i.e., it wants the program to terminate. To
+ // recover from errors we use setjmp() as shown in their example. setjmp() is
+ // C's implementation for the "call with current continuation" functionality
+ // seen in some functional programming languages.
+ // A formatted message can be output, but is unsafe for release.
+#ifdef DEBUG
+ char buf[JMSG_LENGTH_MAX];
+ (*cinfo->err->format_message)(cinfo, buf);
+ // ERROR: Error in jpeglib: buf
+#endif
+
+ SetJmpErrorMgr* mgr = reinterpret_cast<SetJmpErrorMgr*>(cinfo->err);
+ // This rewinds the call stack to the point of the corresponding setjmp()
+ // and causes it to return (for a second time) with value 1.
+ longjmp(mgr->setjmp_buffer, 1);
+}
+#endif
+
+void MJpegDecoder::AllocOutputBuffers(int num_outbufs) {
+ if (num_outbufs != num_outbufs_) {
+ // We could perhaps optimize this case to resize the output buffers without
+ // necessarily having to delete and recreate each one, but it's not worth
+ // it.
+ DestroyOutputBuffers();
+
+ scanlines_ = new uint8** [num_outbufs];
+ scanlines_sizes_ = new int[num_outbufs];
+ databuf_ = new uint8* [num_outbufs];
+ databuf_strides_ = new int[num_outbufs];
+
+ for (int i = 0; i < num_outbufs; ++i) {
+ scanlines_[i] = NULL;
+ scanlines_sizes_[i] = 0;
+ databuf_[i] = NULL;
+ databuf_strides_[i] = 0;
+ }
+
+ num_outbufs_ = num_outbufs;
+ }
+}
+
+void MJpegDecoder::DestroyOutputBuffers() {
+ for (int i = 0; i < num_outbufs_; ++i) {
+ delete [] scanlines_[i];
+ delete [] databuf_[i];
+ }
+ delete [] scanlines_;
+ delete [] databuf_;
+ delete [] scanlines_sizes_;
+ delete [] databuf_strides_;
+ scanlines_ = NULL;
+ databuf_ = NULL;
+ scanlines_sizes_ = NULL;
+ databuf_strides_ = NULL;
+ num_outbufs_ = 0;
+}
+
+// JDCT_IFAST and do_block_smoothing improve performance substantially.
+LIBYUV_BOOL MJpegDecoder::StartDecode() {
+ decompress_struct_->raw_data_out = TRUE;
+ decompress_struct_->dct_method = JDCT_IFAST; // JDCT_ISLOW is default
+ decompress_struct_->dither_mode = JDITHER_NONE;
+ // Not applicable to 'raw':
+ decompress_struct_->do_fancy_upsampling = (boolean)(LIBYUV_FALSE);
+ // Only for buffered mode:
+ decompress_struct_->enable_2pass_quant = (boolean)(LIBYUV_FALSE);
+ // Blocky but fast:
+ decompress_struct_->do_block_smoothing = (boolean)(LIBYUV_FALSE);
+
+ if (!jpeg_start_decompress(decompress_struct_)) {
+ // ERROR: Couldn't start JPEG decompressor";
+ return LIBYUV_FALSE;
+ }
+ return LIBYUV_TRUE;
+}
+
+LIBYUV_BOOL MJpegDecoder::FinishDecode() {
+ // jpeglib considers it an error if we finish without decoding the whole
+ // image, so we call "abort" rather than "finish".
+ jpeg_abort_decompress(decompress_struct_);
+ return LIBYUV_TRUE;
+}
+
+void MJpegDecoder::SetScanlinePointers(uint8** data) {
+ for (int i = 0; i < num_outbufs_; ++i) {
+ uint8* data_i = data[i];
+ for (int j = 0; j < scanlines_sizes_[i]; ++j) {
+ scanlines_[i][j] = data_i;
+ data_i += GetComponentStride(i);
+ }
+ }
+}
+
+inline LIBYUV_BOOL MJpegDecoder::DecodeImcuRow() {
+ return (unsigned int)(GetImageScanlinesPerImcuRow()) ==
+ jpeg_read_raw_data(decompress_struct_,
+ scanlines_,
+ GetImageScanlinesPerImcuRow());
+}
+
+// The helper function which recognizes the jpeg sub-sampling type.
+JpegSubsamplingType MJpegDecoder::JpegSubsamplingTypeHelper(
+ int* subsample_x, int* subsample_y, int number_of_components) {
+ if (number_of_components == 3) { // Color images.
+ if (subsample_x[0] == 1 && subsample_y[0] == 1 &&
+ subsample_x[1] == 2 && subsample_y[1] == 2 &&
+ subsample_x[2] == 2 && subsample_y[2] == 2) {
+ return kJpegYuv420;
+ } else if (subsample_x[0] == 1 && subsample_y[0] == 1 &&
+ subsample_x[1] == 2 && subsample_y[1] == 1 &&
+ subsample_x[2] == 2 && subsample_y[2] == 1) {
+ return kJpegYuv422;
+ } else if (subsample_x[0] == 1 && subsample_y[0] == 1 &&
+ subsample_x[1] == 1 && subsample_y[1] == 1 &&
+ subsample_x[2] == 1 && subsample_y[2] == 1) {
+ return kJpegYuv444;
+ }
+ } else if (number_of_components == 1) { // Grey-scale images.
+ if (subsample_x[0] == 1 && subsample_y[0] == 1) {
+ return kJpegYuv400;
+ }
+ }
+ return kJpegUnknown;
+}
+
+} // namespace libyuv
+#endif // HAVE_JPEG
+
diff --git a/media/libaom/src/third_party/libyuv/source/mjpeg_validate.cc b/media/libaom/src/third_party/libyuv/source/mjpeg_validate.cc
new file mode 100644
index 000000000..8edfbe1e7
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/mjpeg_validate.cc
@@ -0,0 +1,101 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/mjpeg_decoder.h"
+
+#include <string.h> // For memchr.
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Enable this to try scasb implementation.
+// #define ENABLE_SCASB 1
+
+#ifdef ENABLE_SCASB
+
+// Multiple of 1.
+__declspec(naked)
+const uint8* ScanRow_ERMS(const uint8* src, uint32 val, int count) {
+ __asm {
+ mov edx, edi
+ mov edi, [esp + 4] // src
+ mov eax, [esp + 8] // val
+ mov ecx, [esp + 12] // count
+ repne scasb
+ jne sr99
+ mov eax, edi
+ sub eax, 1
+ mov edi, edx
+ ret
+
+ sr99:
+ mov eax, 0
+ mov edi, edx
+ ret
+ }
+}
+#endif
+
+// Helper function to scan for EOI marker.
+static LIBYUV_BOOL ScanEOI(const uint8* sample, size_t sample_size) {
+ const uint8* end = sample + sample_size - 1;
+ const uint8* it = sample;
+ for (;;) {
+#ifdef ENABLE_SCASB
+ it = ScanRow_ERMS(it, 0xff, end - it);
+#else
+ it = static_cast<const uint8*>(memchr(it, 0xff, end - it));
+#endif
+ if (it == NULL) {
+ break;
+ }
+ if (it[1] == 0xd9) {
+ return LIBYUV_TRUE; // Success: Valid jpeg.
+ }
+ ++it; // Skip over current 0xff.
+ }
+ // ERROR: Invalid jpeg end code not found. Size sample_size
+ return LIBYUV_FALSE;
+}
+
+// Helper function to validate the jpeg appears intact.
+LIBYUV_BOOL ValidateJpeg(const uint8* sample, size_t sample_size) {
+ const size_t kBackSearchSize = 1024;
+ if (sample_size < 64) {
+ // ERROR: Invalid jpeg size: sample_size
+ return LIBYUV_FALSE;
+ }
+ if (sample[0] != 0xff || sample[1] != 0xd8) { // Start Of Image
+ // ERROR: Invalid jpeg initial start code
+ return LIBYUV_FALSE;
+ }
+ // Step over SOI marker.
+ sample += 2;
+ sample_size -= 2;
+
+ // Look for the End Of Image (EOI) marker in the end kilobyte of the buffer.
+ if (sample_size > kBackSearchSize) {
+ if (ScanEOI(sample + sample_size - kBackSearchSize, kBackSearchSize)) {
+ return LIBYUV_TRUE; // Success: Valid jpeg.
+ }
+ // Reduce search size for forward search.
+ sample_size = sample_size - kBackSearchSize + 1;
+ }
+ return ScanEOI(sample, sample_size);
+
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
diff --git a/media/libaom/src/third_party/libyuv/source/planar_functions.cc b/media/libaom/src/third_party/libyuv/source/planar_functions.cc
new file mode 100644
index 000000000..b96bd5020
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/planar_functions.cc
@@ -0,0 +1,2555 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/planar_functions.h"
+
+#include <string.h> // for memset()
+
+#include "libyuv/cpu_id.h"
+#ifdef HAVE_JPEG
+#include "libyuv/mjpeg_decoder.h"
+#endif
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Copy a plane of data
+LIBYUV_API
+void CopyPlane(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ int y;
+ void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C;
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ dst_stride_y == width) {
+ width *= height;
+ height = 1;
+ src_stride_y = dst_stride_y = 0;
+ }
+ // Nothing to do.
+ if (src_y == dst_y && src_stride_y == dst_stride_y) {
+ return;
+ }
+#if defined(HAS_COPYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_AVX)
+ if (TestCpuFlag(kCpuHasAVX)) {
+ CopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+ }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ CopyRow = CopyRow_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ CopyRow = CopyRow_MIPS;
+ }
+#endif
+
+ // Copy plane
+ for (y = 0; y < height; ++y) {
+ CopyRow(src_y, dst_y, width);
+ src_y += src_stride_y;
+ dst_y += dst_stride_y;
+ }
+}
+
+LIBYUV_API
+void CopyPlane_16(const uint16* src_y, int src_stride_y,
+ uint16* dst_y, int dst_stride_y,
+ int width, int height) {
+ int y;
+ void (*CopyRow)(const uint16* src, uint16* dst, int width) = CopyRow_16_C;
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ dst_stride_y == width) {
+ width *= height;
+ height = 1;
+ src_stride_y = dst_stride_y = 0;
+ }
+#if defined(HAS_COPYROW_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 32)) {
+ CopyRow = CopyRow_16_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_16_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ CopyRow = CopyRow_16_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 32)) {
+ CopyRow = CopyRow_16_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_16_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ CopyRow = CopyRow_16_MIPS;
+ }
+#endif
+
+ // Copy plane
+ for (y = 0; y < height; ++y) {
+ CopyRow(src_y, dst_y, width);
+ src_y += src_stride_y;
+ dst_y += dst_stride_y;
+ }
+}
+
+// Copy I422.
+LIBYUV_API
+int I422Copy(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int halfwidth = (width + 1) >> 1;
+ if (!src_y || !src_u || !src_v ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_u = src_u + (height - 1) * src_stride_u;
+ src_v = src_v + (height - 1) * src_stride_v;
+ src_stride_y = -src_stride_y;
+ src_stride_u = -src_stride_u;
+ src_stride_v = -src_stride_v;
+ }
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, height);
+ CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, height);
+ return 0;
+}
+
+// Copy I444.
+LIBYUV_API
+int I444Copy(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ if (!src_y || !src_u || !src_v ||
+ !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_u = src_u + (height - 1) * src_stride_u;
+ src_v = src_v + (height - 1) * src_stride_v;
+ src_stride_y = -src_stride_y;
+ src_stride_u = -src_stride_u;
+ src_stride_v = -src_stride_v;
+ }
+
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ CopyPlane(src_u, src_stride_u, dst_u, dst_stride_u, width, height);
+ CopyPlane(src_v, src_stride_v, dst_v, dst_stride_v, width, height);
+ return 0;
+}
+
+// Copy I400.
+LIBYUV_API
+int I400ToI400(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ if (!src_y || !dst_y || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ return 0;
+}
+
+// Convert I420 to I400.
+LIBYUV_API
+int I420ToI400(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ if (!src_y || !dst_y || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+ CopyPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ return 0;
+}
+
+// Mirror a plane of data.
+void MirrorPlane(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ int y;
+ void (*MirrorRow)(const uint8* src, uint8* dst, int width) = MirrorRow_C;
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+#if defined(HAS_MIRRORROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ MirrorRow = MirrorRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ MirrorRow = MirrorRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_MIRRORROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ MirrorRow = MirrorRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ MirrorRow = MirrorRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_MIRRORROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ MirrorRow = MirrorRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ MirrorRow = MirrorRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_MIRRORROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ MirrorRow = MirrorRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ MirrorRow = MirrorRow_AVX2;
+ }
+ }
+#endif
+// TODO(fbarchard): Mirror on mips handle unaligned memory.
+#if defined(HAS_MIRRORROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(dst_y, 4) && IS_ALIGNED(dst_stride_y, 4)) {
+ MirrorRow = MirrorRow_MIPS_DSPR2;
+ }
+#endif
+
+ // Mirror plane
+ for (y = 0; y < height; ++y) {
+ MirrorRow(src_y, dst_y, width);
+ src_y += src_stride_y;
+ dst_y += dst_stride_y;
+ }
+}
+
+// Convert YUY2 to I422.
+LIBYUV_API
+int YUY2ToI422(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*YUY2ToUV422Row)(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) =
+ YUY2ToUV422Row_C;
+ void (*YUY2ToYRow)(const uint8* src_yuy2, uint8* dst_y, int pix) =
+ YUY2ToYRow_C;
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2;
+ src_stride_yuy2 = -src_stride_yuy2;
+ }
+ // Coalesce rows.
+ if (src_stride_yuy2 == width * 2 &&
+ dst_stride_y == width &&
+ dst_stride_u * 2 == width &&
+ dst_stride_v * 2 == width) {
+ width *= height;
+ height = 1;
+ src_stride_yuy2 = dst_stride_y = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_YUY2TOYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ YUY2ToUV422Row = YUY2ToUV422Row_Any_SSE2;
+ YUY2ToYRow = YUY2ToYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ YUY2ToUV422Row = YUY2ToUV422Row_SSE2;
+ YUY2ToYRow = YUY2ToYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_YUY2TOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ YUY2ToUV422Row = YUY2ToUV422Row_Any_AVX2;
+ YUY2ToYRow = YUY2ToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ YUY2ToUV422Row = YUY2ToUV422Row_AVX2;
+ YUY2ToYRow = YUY2ToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_YUY2TOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ YUY2ToYRow = YUY2ToYRow_Any_NEON;
+ if (width >= 16) {
+ YUY2ToUV422Row = YUY2ToUV422Row_Any_NEON;
+ }
+ if (IS_ALIGNED(width, 16)) {
+ YUY2ToYRow = YUY2ToYRow_NEON;
+ YUY2ToUV422Row = YUY2ToUV422Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ YUY2ToUV422Row(src_yuy2, dst_u, dst_v, width);
+ YUY2ToYRow(src_yuy2, dst_y, width);
+ src_yuy2 += src_stride_yuy2;
+ dst_y += dst_stride_y;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ return 0;
+}
+
+// Convert UYVY to I422.
+LIBYUV_API
+int UYVYToI422(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int y;
+ void (*UYVYToUV422Row)(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) =
+ UYVYToUV422Row_C;
+ void (*UYVYToYRow)(const uint8* src_uyvy,
+ uint8* dst_y, int pix) = UYVYToYRow_C;
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy;
+ src_stride_uyvy = -src_stride_uyvy;
+ }
+ // Coalesce rows.
+ if (src_stride_uyvy == width * 2 &&
+ dst_stride_y == width &&
+ dst_stride_u * 2 == width &&
+ dst_stride_v * 2 == width) {
+ width *= height;
+ height = 1;
+ src_stride_uyvy = dst_stride_y = dst_stride_u = dst_stride_v = 0;
+ }
+#if defined(HAS_UYVYTOYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ UYVYToUV422Row = UYVYToUV422Row_Any_SSE2;
+ UYVYToYRow = UYVYToYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ UYVYToUV422Row = UYVYToUV422Row_SSE2;
+ UYVYToYRow = UYVYToYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_UYVYTOYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ UYVYToUV422Row = UYVYToUV422Row_Any_AVX2;
+ UYVYToYRow = UYVYToYRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ UYVYToUV422Row = UYVYToUV422Row_AVX2;
+ UYVYToYRow = UYVYToYRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_UYVYTOYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ UYVYToYRow = UYVYToYRow_Any_NEON;
+ if (width >= 16) {
+ UYVYToUV422Row = UYVYToUV422Row_Any_NEON;
+ }
+ if (IS_ALIGNED(width, 16)) {
+ UYVYToYRow = UYVYToYRow_NEON;
+ UYVYToUV422Row = UYVYToUV422Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ UYVYToUV422Row(src_uyvy, dst_u, dst_v, width);
+ UYVYToYRow(src_uyvy, dst_y, width);
+ src_uyvy += src_stride_uyvy;
+ dst_y += dst_stride_y;
+ dst_u += dst_stride_u;
+ dst_v += dst_stride_v;
+ }
+ return 0;
+}
+
+// Mirror I400 with optional flipping
+LIBYUV_API
+int I400Mirror(const uint8* src_y, int src_stride_y,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ if (!src_y || !dst_y ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+
+ MirrorPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ return 0;
+}
+
+// Mirror I420 with optional flipping
+LIBYUV_API
+int I420Mirror(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height) {
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ if (!src_y || !src_u || !src_v || !dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_u = src_u + (halfheight - 1) * src_stride_u;
+ src_v = src_v + (halfheight - 1) * src_stride_v;
+ src_stride_y = -src_stride_y;
+ src_stride_u = -src_stride_u;
+ src_stride_v = -src_stride_v;
+ }
+
+ if (dst_y) {
+ MirrorPlane(src_y, src_stride_y, dst_y, dst_stride_y, width, height);
+ }
+ MirrorPlane(src_u, src_stride_u, dst_u, dst_stride_u, halfwidth, halfheight);
+ MirrorPlane(src_v, src_stride_v, dst_v, dst_stride_v, halfwidth, halfheight);
+ return 0;
+}
+
+// ARGB mirror.
+LIBYUV_API
+int ARGBMirror(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBMirrorRow)(const uint8* src, uint8* dst, int width) =
+ ARGBMirrorRow_C;
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+#if defined(HAS_ARGBMIRRORROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBMirrorRow = ARGBMirrorRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBMIRRORROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBMirrorRow = ARGBMirrorRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBMIRRORROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBMirrorRow = ARGBMirrorRow_AVX2;
+ }
+ }
+#endif
+
+ // Mirror plane
+ for (y = 0; y < height; ++y) {
+ ARGBMirrorRow(src_argb, dst_argb, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Get a blender that optimized for the CPU and pixel count.
+// As there are 6 blenders to choose from, the caller should try to use
+// the same blend function for all pixels if possible.
+LIBYUV_API
+ARGBBlendRow GetARGBBlend() {
+ void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width) = ARGBBlendRow_C;
+#if defined(HAS_ARGBBLENDROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBBlendRow = ARGBBlendRow_SSSE3;
+ return ARGBBlendRow;
+ }
+#endif
+#if defined(HAS_ARGBBLENDROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBBlendRow = ARGBBlendRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBBLENDROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBBlendRow = ARGBBlendRow_NEON;
+ }
+#endif
+ return ARGBBlendRow;
+}
+
+// Alpha Blend 2 ARGB images and store to destination.
+LIBYUV_API
+int ARGBBlend(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBBlendRow)(const uint8* src_argb, const uint8* src_argb1,
+ uint8* dst_argb, int width) = GetARGBBlend();
+ if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb0 == width * 4 &&
+ src_stride_argb1 == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0;
+ }
+
+ for (y = 0; y < height; ++y) {
+ ARGBBlendRow(src_argb0, src_argb1, dst_argb, width);
+ src_argb0 += src_stride_argb0;
+ src_argb1 += src_stride_argb1;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Multiply 2 ARGB images and store to destination.
+LIBYUV_API
+int ARGBMultiply(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBMultiplyRow)(const uint8* src0, const uint8* src1, uint8* dst,
+ int width) = ARGBMultiplyRow_C;
+ if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb0 == width * 4 &&
+ src_stride_argb1 == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBMULTIPLYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBMultiplyRow = ARGBMultiplyRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBMultiplyRow = ARGBMultiplyRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBMULTIPLYROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBMultiplyRow = ARGBMultiplyRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBMultiplyRow = ARGBMultiplyRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBMULTIPLYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBMultiplyRow = ARGBMultiplyRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBMultiplyRow = ARGBMultiplyRow_NEON;
+ }
+ }
+#endif
+
+ // Multiply plane
+ for (y = 0; y < height; ++y) {
+ ARGBMultiplyRow(src_argb0, src_argb1, dst_argb, width);
+ src_argb0 += src_stride_argb0;
+ src_argb1 += src_stride_argb1;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Add 2 ARGB images and store to destination.
+LIBYUV_API
+int ARGBAdd(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBAddRow)(const uint8* src0, const uint8* src1, uint8* dst,
+ int width) = ARGBAddRow_C;
+ if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb0 == width * 4 &&
+ src_stride_argb1 == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBADDROW_SSE2) && (defined(_MSC_VER) && !defined(__clang__))
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBAddRow = ARGBAddRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBADDROW_SSE2) && !(defined(_MSC_VER) && !defined(__clang__))
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBAddRow = ARGBAddRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBAddRow = ARGBAddRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBADDROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBAddRow = ARGBAddRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAddRow = ARGBAddRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBADDROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBAddRow = ARGBAddRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAddRow = ARGBAddRow_NEON;
+ }
+ }
+#endif
+
+ // Add plane
+ for (y = 0; y < height; ++y) {
+ ARGBAddRow(src_argb0, src_argb1, dst_argb, width);
+ src_argb0 += src_stride_argb0;
+ src_argb1 += src_stride_argb1;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Subtract 2 ARGB images and store to destination.
+LIBYUV_API
+int ARGBSubtract(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBSubtractRow)(const uint8* src0, const uint8* src1, uint8* dst,
+ int width) = ARGBSubtractRow_C;
+ if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb0 == width * 4 &&
+ src_stride_argb1 == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBSUBTRACTROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBSubtractRow = ARGBSubtractRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBSubtractRow = ARGBSubtractRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBSUBTRACTROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBSubtractRow = ARGBSubtractRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBSubtractRow = ARGBSubtractRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBSUBTRACTROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBSubtractRow = ARGBSubtractRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBSubtractRow = ARGBSubtractRow_NEON;
+ }
+ }
+#endif
+
+ // Subtract plane
+ for (y = 0; y < height; ++y) {
+ ARGBSubtractRow(src_argb0, src_argb1, dst_argb, width);
+ src_argb0 += src_stride_argb0;
+ src_argb1 += src_stride_argb1;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert I422 to BGRA.
+LIBYUV_API
+int I422ToBGRA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_bgra, int dst_stride_bgra,
+ int width, int height) {
+ int y;
+ void (*I422ToBGRARow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToBGRARow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_bgra ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_bgra = dst_bgra + (height - 1) * dst_stride_bgra;
+ dst_stride_bgra = -dst_stride_bgra;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_bgra == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_bgra = 0;
+ }
+#if defined(HAS_I422TOBGRAROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToBGRARow = I422ToBGRARow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToBGRARow = I422ToBGRARow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToBGRARow = I422ToBGRARow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToBGRARow = I422ToBGRARow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToBGRARow = I422ToBGRARow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToBGRARow = I422ToBGRARow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOBGRAROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_bgra, 4) && IS_ALIGNED(dst_stride_bgra, 4)) {
+ I422ToBGRARow = I422ToBGRARow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToBGRARow(src_y, src_u, src_v, dst_bgra, width);
+ dst_bgra += dst_stride_bgra;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+// Convert I422 to ABGR.
+LIBYUV_API
+int I422ToABGR(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_abgr, int dst_stride_abgr,
+ int width, int height) {
+ int y;
+ void (*I422ToABGRRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToABGRRow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_abgr ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_abgr = dst_abgr + (height - 1) * dst_stride_abgr;
+ dst_stride_abgr = -dst_stride_abgr;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_abgr == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_abgr = 0;
+ }
+#if defined(HAS_I422TOABGRROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
+ I422ToABGRRow = I422ToABGRRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToABGRRow = I422ToABGRRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOABGRROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToABGRRow = I422ToABGRRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToABGRRow = I422ToABGRRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOABGRROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToABGRRow = I422ToABGRRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToABGRRow = I422ToABGRRow_AVX2;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToABGRRow(src_y, src_u, src_v, dst_abgr, width);
+ dst_abgr += dst_stride_abgr;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+// Convert I422 to RGBA.
+LIBYUV_API
+int I422ToRGBA(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_rgba, int dst_stride_rgba,
+ int width, int height) {
+ int y;
+ void (*I422ToRGBARow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToRGBARow_C;
+ if (!src_y || !src_u || !src_v ||
+ !dst_rgba ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgba = dst_rgba + (height - 1) * dst_stride_rgba;
+ dst_stride_rgba = -dst_stride_rgba;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ src_stride_u * 2 == width &&
+ src_stride_v * 2 == width &&
+ dst_stride_rgba == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = src_stride_u = src_stride_v = dst_stride_rgba = 0;
+ }
+#if defined(HAS_I422TORGBAROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && width >= 8) {
+ I422ToRGBARow = I422ToRGBARow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGBARow = I422ToRGBARow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGBAROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToRGBARow = I422ToRGBARow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ I422ToRGBARow = I422ToRGBARow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TORGBAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToRGBARow = I422ToRGBARow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ I422ToRGBARow = I422ToRGBARow_AVX2;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ I422ToRGBARow(src_y, src_u, src_v, dst_rgba, width);
+ dst_rgba += dst_stride_rgba;
+ src_y += src_stride_y;
+ src_u += src_stride_u;
+ src_v += src_stride_v;
+ }
+ return 0;
+}
+
+// Convert NV12 to RGB565.
+LIBYUV_API
+int NV12ToRGB565(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height) {
+ int y;
+ void (*NV12ToRGB565Row)(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* rgb_buf,
+ int width) = NV12ToRGB565Row_C;
+ if (!src_y || !src_uv || !dst_rgb565 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgb565 = dst_rgb565 + (height - 1) * dst_stride_rgb565;
+ dst_stride_rgb565 = -dst_stride_rgb565;
+ }
+#if defined(HAS_NV12TORGB565ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ NV12ToRGB565Row = NV12ToRGB565Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ NV12ToRGB565Row = NV12ToRGB565Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_NV12TORGB565ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ NV12ToRGB565Row = NV12ToRGB565Row_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ NV12ToRGB565Row = NV12ToRGB565Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_NV12TORGB565ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ NV12ToRGB565Row = NV12ToRGB565Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ NV12ToRGB565Row = NV12ToRGB565Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ NV12ToRGB565Row(src_y, src_uv, dst_rgb565, width);
+ dst_rgb565 += dst_stride_rgb565;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_uv += src_stride_uv;
+ }
+ }
+ return 0;
+}
+
+// Convert NV21 to RGB565.
+LIBYUV_API
+int NV21ToRGB565(const uint8* src_y, int src_stride_y,
+ const uint8* src_vu, int src_stride_vu,
+ uint8* dst_rgb565, int dst_stride_rgb565,
+ int width, int height) {
+ int y;
+ void (*NV21ToRGB565Row)(const uint8* y_buf,
+ const uint8* src_vu,
+ uint8* rgb_buf,
+ int width) = NV21ToRGB565Row_C;
+ if (!src_y || !src_vu || !dst_rgb565 ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_rgb565 = dst_rgb565 + (height - 1) * dst_stride_rgb565;
+ dst_stride_rgb565 = -dst_stride_rgb565;
+ }
+#if defined(HAS_NV21TORGB565ROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ NV21ToRGB565Row = NV21ToRGB565Row_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ NV21ToRGB565Row = NV21ToRGB565Row_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_NV21TORGB565ROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ NV21ToRGB565Row = NV21ToRGB565Row_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ NV21ToRGB565Row = NV21ToRGB565Row_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_NV21TORGB565ROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ NV21ToRGB565Row = NV21ToRGB565Row_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ NV21ToRGB565Row = NV21ToRGB565Row_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ NV21ToRGB565Row(src_y, src_vu, dst_rgb565, width);
+ dst_rgb565 += dst_stride_rgb565;
+ src_y += src_stride_y;
+ if (y & 1) {
+ src_vu += src_stride_vu;
+ }
+ }
+ return 0;
+}
+
+LIBYUV_API
+void SetPlane(uint8* dst_y, int dst_stride_y,
+ int width, int height,
+ uint32 value) {
+ int y;
+ void (*SetRow)(uint8* dst, uint8 value, int pix) = SetRow_C;
+ if (height < 0) {
+ height = -height;
+ dst_y = dst_y + (height - 1) * dst_stride_y;
+ dst_stride_y = -dst_stride_y;
+ }
+ // Coalesce rows.
+ if (dst_stride_y == width) {
+ width *= height;
+ height = 1;
+ dst_stride_y = 0;
+ }
+#if defined(HAS_SETROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SetRow = SetRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ SetRow = SetRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SETROW_X86)
+ if (TestCpuFlag(kCpuHasX86)) {
+ SetRow = SetRow_Any_X86;
+ if (IS_ALIGNED(width, 4)) {
+ SetRow = SetRow_X86;
+ }
+ }
+#endif
+#if defined(HAS_SETROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ SetRow = SetRow_ERMS;
+ }
+#endif
+
+ // Set plane
+ for (y = 0; y < height; ++y) {
+ SetRow(dst_y, value, width);
+ dst_y += dst_stride_y;
+ }
+}
+
+// Draw a rectangle into I420
+LIBYUV_API
+int I420Rect(uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int x, int y,
+ int width, int height,
+ int value_y, int value_u, int value_v) {
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ uint8* start_y = dst_y + y * dst_stride_y + x;
+ uint8* start_u = dst_u + (y / 2) * dst_stride_u + (x / 2);
+ uint8* start_v = dst_v + (y / 2) * dst_stride_v + (x / 2);
+ if (!dst_y || !dst_u || !dst_v ||
+ width <= 0 || height == 0 ||
+ x < 0 || y < 0 ||
+ value_y < 0 || value_y > 255 ||
+ value_u < 0 || value_u > 255 ||
+ value_v < 0 || value_v > 255) {
+ return -1;
+ }
+
+ SetPlane(start_y, dst_stride_y, width, height, value_y);
+ SetPlane(start_u, dst_stride_u, halfwidth, halfheight, value_u);
+ SetPlane(start_v, dst_stride_v, halfwidth, halfheight, value_v);
+ return 0;
+}
+
+// Draw a rectangle into ARGB
+LIBYUV_API
+int ARGBRect(uint8* dst_argb, int dst_stride_argb,
+ int dst_x, int dst_y,
+ int width, int height,
+ uint32 value) {
+ int y;
+ void (*ARGBSetRow)(uint8* dst_argb, uint32 value, int pix) = ARGBSetRow_C;
+ if (!dst_argb ||
+ width <= 0 || height == 0 ||
+ dst_x < 0 || dst_y < 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ dst_argb += dst_y * dst_stride_argb + dst_x * 4;
+ // Coalesce rows.
+ if (dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ dst_stride_argb = 0;
+ }
+
+#if defined(HAS_ARGBSETROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBSetRow = ARGBSetRow_Any_NEON;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBSetRow = ARGBSetRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBSETROW_X86)
+ if (TestCpuFlag(kCpuHasX86)) {
+ ARGBSetRow = ARGBSetRow_X86;
+ }
+#endif
+
+ // Set plane
+ for (y = 0; y < height; ++y) {
+ ARGBSetRow(dst_argb, value, width);
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert unattentuated ARGB to preattenuated ARGB.
+// An unattenutated ARGB alpha blend uses the formula
+// p = a * f + (1 - a) * b
+// where
+// p is output pixel
+// f is foreground pixel
+// b is background pixel
+// a is alpha value from foreground pixel
+// An preattenutated ARGB alpha blend uses the formula
+// p = f + (1 - a) * b
+// where
+// f is foreground pixel premultiplied by alpha
+
+LIBYUV_API
+int ARGBAttenuate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBAttenuateRow)(const uint8* src_argb, uint8* dst_argb,
+ int width) = ARGBAttenuateRow_C;
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBATTENUATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBATTENUATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBAttenuateRow = ARGBAttenuateRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBAttenuateRow(src_argb, dst_argb, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert preattentuated ARGB to unattenuated ARGB.
+LIBYUV_API
+int ARGBUnattenuate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBUnattenuateRow)(const uint8* src_argb, uint8* dst_argb,
+ int width) = ARGBUnattenuateRow_C;
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBUNATTENUATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBUnattenuateRow = ARGBUnattenuateRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBUnattenuateRow = ARGBUnattenuateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBUNATTENUATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBUnattenuateRow = ARGBUnattenuateRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBUnattenuateRow = ARGBUnattenuateRow_AVX2;
+ }
+ }
+#endif
+// TODO(fbarchard): Neon version.
+
+ for (y = 0; y < height; ++y) {
+ ARGBUnattenuateRow(src_argb, dst_argb, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Convert ARGB to Grayed ARGB.
+LIBYUV_API
+int ARGBGrayTo(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBGrayRow)(const uint8* src_argb, uint8* dst_argb,
+ int width) = ARGBGrayRow_C;
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBGRAYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) {
+ ARGBGrayRow = ARGBGrayRow_SSSE3;
+ }
+#endif
+#if defined(HAS_ARGBGRAYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ ARGBGrayRow = ARGBGrayRow_NEON;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBGrayRow(src_argb, dst_argb, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Make a rectangle of ARGB gray scale.
+LIBYUV_API
+int ARGBGray(uint8* dst_argb, int dst_stride_argb,
+ int dst_x, int dst_y,
+ int width, int height) {
+ int y;
+ void (*ARGBGrayRow)(const uint8* src_argb, uint8* dst_argb,
+ int width) = ARGBGrayRow_C;
+ uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
+ if (!dst_argb || width <= 0 || height <= 0 || dst_x < 0 || dst_y < 0) {
+ return -1;
+ }
+ // Coalesce rows.
+ if (dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBGRAYROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) {
+ ARGBGrayRow = ARGBGrayRow_SSSE3;
+ }
+#endif
+#if defined(HAS_ARGBGRAYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ ARGBGrayRow = ARGBGrayRow_NEON;
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ ARGBGrayRow(dst, dst, width);
+ dst += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Make a rectangle of ARGB Sepia tone.
+LIBYUV_API
+int ARGBSepia(uint8* dst_argb, int dst_stride_argb,
+ int dst_x, int dst_y, int width, int height) {
+ int y;
+ void (*ARGBSepiaRow)(uint8* dst_argb, int width) = ARGBSepiaRow_C;
+ uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
+ if (!dst_argb || width <= 0 || height <= 0 || dst_x < 0 || dst_y < 0) {
+ return -1;
+ }
+ // Coalesce rows.
+ if (dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBSEPIAROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) {
+ ARGBSepiaRow = ARGBSepiaRow_SSSE3;
+ }
+#endif
+#if defined(HAS_ARGBSEPIAROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ ARGBSepiaRow = ARGBSepiaRow_NEON;
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ ARGBSepiaRow(dst, width);
+ dst += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Apply a 4x4 matrix to each ARGB pixel.
+// Note: Normally for shading, but can be used to swizzle or invert.
+LIBYUV_API
+int ARGBColorMatrix(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ const int8* matrix_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBColorMatrixRow)(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width) = ARGBColorMatrixRow_C;
+ if (!src_argb || !dst_argb || !matrix_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBCOLORMATRIXROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 8)) {
+ ARGBColorMatrixRow = ARGBColorMatrixRow_SSSE3;
+ }
+#endif
+#if defined(HAS_ARGBCOLORMATRIXROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ ARGBColorMatrixRow = ARGBColorMatrixRow_NEON;
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ ARGBColorMatrixRow(src_argb, dst_argb, matrix_argb, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Apply a 4x3 matrix to each ARGB pixel.
+// Deprecated.
+LIBYUV_API
+int RGBColorMatrix(uint8* dst_argb, int dst_stride_argb,
+ const int8* matrix_rgb,
+ int dst_x, int dst_y, int width, int height) {
+ SIMD_ALIGNED(int8 matrix_argb[16]);
+ uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
+ if (!dst_argb || !matrix_rgb || width <= 0 || height <= 0 ||
+ dst_x < 0 || dst_y < 0) {
+ return -1;
+ }
+
+ // Convert 4x3 7 bit matrix to 4x4 6 bit matrix.
+ matrix_argb[0] = matrix_rgb[0] / 2;
+ matrix_argb[1] = matrix_rgb[1] / 2;
+ matrix_argb[2] = matrix_rgb[2] / 2;
+ matrix_argb[3] = matrix_rgb[3] / 2;
+ matrix_argb[4] = matrix_rgb[4] / 2;
+ matrix_argb[5] = matrix_rgb[5] / 2;
+ matrix_argb[6] = matrix_rgb[6] / 2;
+ matrix_argb[7] = matrix_rgb[7] / 2;
+ matrix_argb[8] = matrix_rgb[8] / 2;
+ matrix_argb[9] = matrix_rgb[9] / 2;
+ matrix_argb[10] = matrix_rgb[10] / 2;
+ matrix_argb[11] = matrix_rgb[11] / 2;
+ matrix_argb[14] = matrix_argb[13] = matrix_argb[12] = 0;
+ matrix_argb[15] = 64; // 1.0
+
+ return ARGBColorMatrix((const uint8*)(dst), dst_stride_argb,
+ dst, dst_stride_argb,
+ &matrix_argb[0], width, height);
+}
+
+// Apply a color table each ARGB pixel.
+// Table contains 256 ARGB values.
+LIBYUV_API
+int ARGBColorTable(uint8* dst_argb, int dst_stride_argb,
+ const uint8* table_argb,
+ int dst_x, int dst_y, int width, int height) {
+ int y;
+ void (*ARGBColorTableRow)(uint8* dst_argb, const uint8* table_argb,
+ int width) = ARGBColorTableRow_C;
+ uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
+ if (!dst_argb || !table_argb || width <= 0 || height <= 0 ||
+ dst_x < 0 || dst_y < 0) {
+ return -1;
+ }
+ // Coalesce rows.
+ if (dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBCOLORTABLEROW_X86)
+ if (TestCpuFlag(kCpuHasX86)) {
+ ARGBColorTableRow = ARGBColorTableRow_X86;
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ ARGBColorTableRow(dst, table_argb, width);
+ dst += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Apply a color table each ARGB pixel but preserve destination alpha.
+// Table contains 256 ARGB values.
+LIBYUV_API
+int RGBColorTable(uint8* dst_argb, int dst_stride_argb,
+ const uint8* table_argb,
+ int dst_x, int dst_y, int width, int height) {
+ int y;
+ void (*RGBColorTableRow)(uint8* dst_argb, const uint8* table_argb,
+ int width) = RGBColorTableRow_C;
+ uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
+ if (!dst_argb || !table_argb || width <= 0 || height <= 0 ||
+ dst_x < 0 || dst_y < 0) {
+ return -1;
+ }
+ // Coalesce rows.
+ if (dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ dst_stride_argb = 0;
+ }
+#if defined(HAS_RGBCOLORTABLEROW_X86)
+ if (TestCpuFlag(kCpuHasX86)) {
+ RGBColorTableRow = RGBColorTableRow_X86;
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ RGBColorTableRow(dst, table_argb, width);
+ dst += dst_stride_argb;
+ }
+ return 0;
+}
+
+// ARGBQuantize is used to posterize art.
+// e.g. rgb / qvalue * qvalue + qvalue / 2
+// But the low levels implement efficiently with 3 parameters, and could be
+// used for other high level operations.
+// dst_argb[0] = (b * scale >> 16) * interval_size + interval_offset;
+// where scale is 1 / interval_size as a fixed point value.
+// The divide is replaces with a multiply by reciprocal fixed point multiply.
+// Caveat - although SSE2 saturates, the C function does not and should be used
+// with care if doing anything but quantization.
+LIBYUV_API
+int ARGBQuantize(uint8* dst_argb, int dst_stride_argb,
+ int scale, int interval_size, int interval_offset,
+ int dst_x, int dst_y, int width, int height) {
+ int y;
+ void (*ARGBQuantizeRow)(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width) = ARGBQuantizeRow_C;
+ uint8* dst = dst_argb + dst_y * dst_stride_argb + dst_x * 4;
+ if (!dst_argb || width <= 0 || height <= 0 || dst_x < 0 || dst_y < 0 ||
+ interval_size < 1 || interval_size > 255) {
+ return -1;
+ }
+ // Coalesce rows.
+ if (dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBQUANTIZEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 4)) {
+ ARGBQuantizeRow = ARGBQuantizeRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBQUANTIZEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ ARGBQuantizeRow = ARGBQuantizeRow_NEON;
+ }
+#endif
+ for (y = 0; y < height; ++y) {
+ ARGBQuantizeRow(dst, scale, interval_size, interval_offset, width);
+ dst += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Computes table of cumulative sum for image where the value is the sum
+// of all values above and to the left of the entry. Used by ARGBBlur.
+LIBYUV_API
+int ARGBComputeCumulativeSum(const uint8* src_argb, int src_stride_argb,
+ int32* dst_cumsum, int dst_stride32_cumsum,
+ int width, int height) {
+ int y;
+ void (*ComputeCumulativeSumRow)(const uint8* row, int32* cumsum,
+ const int32* previous_cumsum, int width) = ComputeCumulativeSumRow_C;
+ int32* previous_cumsum = dst_cumsum;
+ if (!dst_cumsum || !src_argb || width <= 0 || height <= 0) {
+ return -1;
+ }
+#if defined(HAS_CUMULATIVESUMTOAVERAGEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2;
+ }
+#endif
+ memset(dst_cumsum, 0, width * sizeof(dst_cumsum[0]) * 4); // 4 int per pixel.
+ for (y = 0; y < height; ++y) {
+ ComputeCumulativeSumRow(src_argb, dst_cumsum, previous_cumsum, width);
+ previous_cumsum = dst_cumsum;
+ dst_cumsum += dst_stride32_cumsum;
+ src_argb += src_stride_argb;
+ }
+ return 0;
+}
+
+// Blur ARGB image.
+// Caller should allocate CumulativeSum table of width * height * 16 bytes
+// aligned to 16 byte boundary. height can be radius * 2 + 2 to save memory
+// as the buffer is treated as circular.
+LIBYUV_API
+int ARGBBlur(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int32* dst_cumsum, int dst_stride32_cumsum,
+ int width, int height, int radius) {
+ int y;
+ void (*ComputeCumulativeSumRow)(const uint8 *row, int32 *cumsum,
+ const int32* previous_cumsum, int width) = ComputeCumulativeSumRow_C;
+ void (*CumulativeSumToAverageRow)(const int32* topleft, const int32* botleft,
+ int width, int area, uint8* dst, int count) = CumulativeSumToAverageRow_C;
+ int32* cumsum_bot_row;
+ int32* max_cumsum_bot_row;
+ int32* cumsum_top_row;
+
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ if (radius > height) {
+ radius = height;
+ }
+ if (radius > (width / 2 - 1)) {
+ radius = width / 2 - 1;
+ }
+ if (radius <= 0) {
+ return -1;
+ }
+#if defined(HAS_CUMULATIVESUMTOAVERAGEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ComputeCumulativeSumRow = ComputeCumulativeSumRow_SSE2;
+ CumulativeSumToAverageRow = CumulativeSumToAverageRow_SSE2;
+ }
+#endif
+ // Compute enough CumulativeSum for first row to be blurred. After this
+ // one row of CumulativeSum is updated at a time.
+ ARGBComputeCumulativeSum(src_argb, src_stride_argb,
+ dst_cumsum, dst_stride32_cumsum,
+ width, radius);
+
+ src_argb = src_argb + radius * src_stride_argb;
+ cumsum_bot_row = &dst_cumsum[(radius - 1) * dst_stride32_cumsum];
+
+ max_cumsum_bot_row = &dst_cumsum[(radius * 2 + 2) * dst_stride32_cumsum];
+ cumsum_top_row = &dst_cumsum[0];
+
+ for (y = 0; y < height; ++y) {
+ int top_y = ((y - radius - 1) >= 0) ? (y - radius - 1) : 0;
+ int bot_y = ((y + radius) < height) ? (y + radius) : (height - 1);
+ int area = radius * (bot_y - top_y);
+ int boxwidth = radius * 4;
+ int x;
+ int n;
+
+ // Increment cumsum_top_row pointer with circular buffer wrap around.
+ if (top_y) {
+ cumsum_top_row += dst_stride32_cumsum;
+ if (cumsum_top_row >= max_cumsum_bot_row) {
+ cumsum_top_row = dst_cumsum;
+ }
+ }
+ // Increment cumsum_bot_row pointer with circular buffer wrap around and
+ // then fill in a row of CumulativeSum.
+ if ((y + radius) < height) {
+ const int32* prev_cumsum_bot_row = cumsum_bot_row;
+ cumsum_bot_row += dst_stride32_cumsum;
+ if (cumsum_bot_row >= max_cumsum_bot_row) {
+ cumsum_bot_row = dst_cumsum;
+ }
+ ComputeCumulativeSumRow(src_argb, cumsum_bot_row, prev_cumsum_bot_row,
+ width);
+ src_argb += src_stride_argb;
+ }
+
+ // Left clipped.
+ for (x = 0; x < radius + 1; ++x) {
+ CumulativeSumToAverageRow(cumsum_top_row, cumsum_bot_row,
+ boxwidth, area, &dst_argb[x * 4], 1);
+ area += (bot_y - top_y);
+ boxwidth += 4;
+ }
+
+ // Middle unclipped.
+ n = (width - 1) - radius - x + 1;
+ CumulativeSumToAverageRow(cumsum_top_row, cumsum_bot_row,
+ boxwidth, area, &dst_argb[x * 4], n);
+
+ // Right clipped.
+ for (x += n; x <= width - 1; ++x) {
+ area -= (bot_y - top_y);
+ boxwidth -= 4;
+ CumulativeSumToAverageRow(cumsum_top_row + (x - radius - 1) * 4,
+ cumsum_bot_row + (x - radius - 1) * 4,
+ boxwidth, area, &dst_argb[x * 4], 1);
+ }
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Multiply ARGB image by a specified ARGB value.
+LIBYUV_API
+int ARGBShade(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height, uint32 value) {
+ int y;
+ void (*ARGBShadeRow)(const uint8* src_argb, uint8* dst_argb,
+ int width, uint32 value) = ARGBShadeRow_C;
+ if (!src_argb || !dst_argb || width <= 0 || height == 0 || value == 0u) {
+ return -1;
+ }
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBSHADEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 4)) {
+ ARGBShadeRow = ARGBShadeRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBSHADEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ ARGBShadeRow = ARGBShadeRow_NEON;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBShadeRow(src_argb, dst_argb, width, value);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Interpolate 2 ARGB images by specified amount (0 to 255).
+LIBYUV_API
+int ARGBInterpolate(const uint8* src_argb0, int src_stride_argb0,
+ const uint8* src_argb1, int src_stride_argb1,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height, int interpolation) {
+ int y;
+ void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) = InterpolateRow_C;
+ if (!src_argb0 || !src_argb1 || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ dst_argb = dst_argb + (height - 1) * dst_stride_argb;
+ dst_stride_argb = -dst_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb0 == width * 4 &&
+ src_stride_argb1 == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb0 = src_stride_argb1 = dst_stride_argb = 0;
+ }
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 4)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(width, 4)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src_argb0, 4) && IS_ALIGNED(src_stride_argb0, 4) &&
+ IS_ALIGNED(src_argb1, 4) && IS_ALIGNED(src_stride_argb1, 4) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ InterpolateRow(dst_argb, src_argb0, src_argb1 - src_argb0,
+ width * 4, interpolation);
+ src_argb0 += src_stride_argb0;
+ src_argb1 += src_stride_argb1;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Shuffle ARGB channel order. e.g. BGRA to ARGB.
+LIBYUV_API
+int ARGBShuffle(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_argb, int dst_stride_argb,
+ const uint8* shuffler, int width, int height) {
+ int y;
+ void (*ARGBShuffleRow)(const uint8* src_bgra, uint8* dst_argb,
+ const uint8* shuffler, int pix) = ARGBShuffleRow_C;
+ if (!src_bgra || !dst_argb ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_bgra = src_bgra + (height - 1) * src_stride_bgra;
+ src_stride_bgra = -src_stride_bgra;
+ }
+ // Coalesce rows.
+ if (src_stride_bgra == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_bgra = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBSHUFFLEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBShuffleRow = ARGBShuffleRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBShuffleRow = ARGBShuffleRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBSHUFFLEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBShuffleRow = ARGBShuffleRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBShuffleRow = ARGBShuffleRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBSHUFFLEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBShuffleRow = ARGBShuffleRow_Any_AVX2;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBShuffleRow = ARGBShuffleRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBSHUFFLEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBShuffleRow = ARGBShuffleRow_Any_NEON;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBShuffleRow = ARGBShuffleRow_NEON;
+ }
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBShuffleRow(src_bgra, dst_argb, shuffler, width);
+ src_bgra += src_stride_bgra;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Sobel ARGB effect.
+static int ARGBSobelize(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height,
+ void (*SobelRow)(const uint8* src_sobelx,
+ const uint8* src_sobely,
+ uint8* dst, int width)) {
+ int y;
+ void (*ARGBToYJRow)(const uint8* src_argb, uint8* dst_g, int pix) =
+ ARGBToYJRow_C;
+ void (*SobelYRow)(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width) = SobelYRow_C;
+ void (*SobelXRow)(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobely, int width) =
+ SobelXRow_C;
+ const int kEdge = 16; // Extra pixels at start of row for extrude/align.
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+
+#if defined(HAS_ARGBTOYJROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ ARGBToYJRow = ARGBToYJRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ ARGBToYJRow = ARGBToYJRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBToYJRow = ARGBToYJRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ ARGBToYJRow = ARGBToYJRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBTOYJROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBToYJRow = ARGBToYJRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBToYJRow = ARGBToYJRow_NEON;
+ }
+ }
+#endif
+
+#if defined(HAS_SOBELYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SobelYRow = SobelYRow_SSE2;
+ }
+#endif
+#if defined(HAS_SOBELYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SobelYRow = SobelYRow_NEON;
+ }
+#endif
+#if defined(HAS_SOBELXROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SobelXRow = SobelXRow_SSE2;
+ }
+#endif
+#if defined(HAS_SOBELXROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SobelXRow = SobelXRow_NEON;
+ }
+#endif
+ {
+ // 3 rows with edges before/after.
+ const int kRowSize = (width + kEdge + 31) & ~31;
+ align_buffer_64(rows, kRowSize * 2 + (kEdge + kRowSize * 3 + kEdge));
+ uint8* row_sobelx = rows;
+ uint8* row_sobely = rows + kRowSize;
+ uint8* row_y = rows + kRowSize * 2;
+
+ // Convert first row.
+ uint8* row_y0 = row_y + kEdge;
+ uint8* row_y1 = row_y0 + kRowSize;
+ uint8* row_y2 = row_y1 + kRowSize;
+ ARGBToYJRow(src_argb, row_y0, width);
+ row_y0[-1] = row_y0[0];
+ memset(row_y0 + width, row_y0[width - 1], 16); // Extrude 16 for valgrind.
+ ARGBToYJRow(src_argb, row_y1, width);
+ row_y1[-1] = row_y1[0];
+ memset(row_y1 + width, row_y1[width - 1], 16);
+ memset(row_y2 + width, 0, 16);
+
+ for (y = 0; y < height; ++y) {
+ // Convert next row of ARGB to G.
+ if (y < (height - 1)) {
+ src_argb += src_stride_argb;
+ }
+ ARGBToYJRow(src_argb, row_y2, width);
+ row_y2[-1] = row_y2[0];
+ row_y2[width] = row_y2[width - 1];
+
+ SobelXRow(row_y0 - 1, row_y1 - 1, row_y2 - 1, row_sobelx, width);
+ SobelYRow(row_y0 - 1, row_y2 - 1, row_sobely, width);
+ SobelRow(row_sobelx, row_sobely, dst_argb, width);
+
+ // Cycle thru circular queue of 3 row_y buffers.
+ {
+ uint8* row_yt = row_y0;
+ row_y0 = row_y1;
+ row_y1 = row_y2;
+ row_y2 = row_yt;
+ }
+
+ dst_argb += dst_stride_argb;
+ }
+ free_aligned_buffer_64(rows);
+ }
+ return 0;
+}
+
+// Sobel ARGB effect.
+LIBYUV_API
+int ARGBSobel(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ void (*SobelRow)(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) = SobelRow_C;
+#if defined(HAS_SOBELROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SobelRow = SobelRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ SobelRow = SobelRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SOBELROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SobelRow = SobelRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ SobelRow = SobelRow_NEON;
+ }
+ }
+#endif
+ return ARGBSobelize(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
+ width, height, SobelRow);
+}
+
+// Sobel ARGB effect with planar output.
+LIBYUV_API
+int ARGBSobelToPlane(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_y, int dst_stride_y,
+ int width, int height) {
+ void (*SobelToPlaneRow)(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_, int width) = SobelToPlaneRow_C;
+#if defined(HAS_SOBELTOPLANEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SobelToPlaneRow = SobelToPlaneRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ SobelToPlaneRow = SobelToPlaneRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SOBELTOPLANEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SobelToPlaneRow = SobelToPlaneRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ SobelToPlaneRow = SobelToPlaneRow_NEON;
+ }
+ }
+#endif
+ return ARGBSobelize(src_argb, src_stride_argb, dst_y, dst_stride_y,
+ width, height, SobelToPlaneRow);
+}
+
+// SobelXY ARGB effect.
+// Similar to Sobel, but also stores Sobel X in R and Sobel Y in B. G = Sobel.
+LIBYUV_API
+int ARGBSobelXY(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ void (*SobelXYRow)(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) = SobelXYRow_C;
+#if defined(HAS_SOBELXYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SobelXYRow = SobelXYRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ SobelXYRow = SobelXYRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SOBELXYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SobelXYRow = SobelXYRow_Any_NEON;
+ if (IS_ALIGNED(width, 8)) {
+ SobelXYRow = SobelXYRow_NEON;
+ }
+ }
+#endif
+ return ARGBSobelize(src_argb, src_stride_argb, dst_argb, dst_stride_argb,
+ width, height, SobelXYRow);
+}
+
+// Apply a 4x4 polynomial to each ARGB pixel.
+LIBYUV_API
+int ARGBPolynomial(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ const float* poly,
+ int width, int height) {
+ int y;
+ void (*ARGBPolynomialRow)(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width) = ARGBPolynomialRow_C;
+ if (!src_argb || !dst_argb || !poly || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBPOLYNOMIALROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 2)) {
+ ARGBPolynomialRow = ARGBPolynomialRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBPOLYNOMIALROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2) && TestCpuFlag(kCpuHasFMA3) &&
+ IS_ALIGNED(width, 2)) {
+ ARGBPolynomialRow = ARGBPolynomialRow_AVX2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBPolynomialRow(src_argb, dst_argb, poly, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Apply a lumacolortable to each ARGB pixel.
+LIBYUV_API
+int ARGBLumaColorTable(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ const uint8* luma,
+ int width, int height) {
+ int y;
+ void (*ARGBLumaColorTableRow)(const uint8* src_argb, uint8* dst_argb,
+ int width, const uint8* luma, const uint32 lumacoeff) =
+ ARGBLumaColorTableRow_C;
+ if (!src_argb || !dst_argb || !luma || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBLUMACOLORTABLEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 4)) {
+ ARGBLumaColorTableRow = ARGBLumaColorTableRow_SSSE3;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBLumaColorTableRow(src_argb, dst_argb, width, luma, 0x00264b0f);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Copy Alpha from one ARGB image to another.
+LIBYUV_API
+int ARGBCopyAlpha(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBCopyAlphaRow)(const uint8* src_argb, uint8* dst_argb, int width) =
+ ARGBCopyAlphaRow_C;
+ if (!src_argb || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+ // Coalesce rows.
+ if (src_stride_argb == width * 4 &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_argb = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBCOPYALPHAROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8)) {
+ ARGBCopyAlphaRow = ARGBCopyAlphaRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBCOPYALPHAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2) && IS_ALIGNED(width, 16)) {
+ ARGBCopyAlphaRow = ARGBCopyAlphaRow_AVX2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBCopyAlphaRow(src_argb, dst_argb, width);
+ src_argb += src_stride_argb;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+// Copy a planar Y channel to the alpha channel of a destination ARGB image.
+LIBYUV_API
+int ARGBCopyYToAlpha(const uint8* src_y, int src_stride_y,
+ uint8* dst_argb, int dst_stride_argb,
+ int width, int height) {
+ int y;
+ void (*ARGBCopyYToAlphaRow)(const uint8* src_y, uint8* dst_argb, int width) =
+ ARGBCopyYToAlphaRow_C;
+ if (!src_y || !dst_argb || width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_stride_y = -src_stride_y;
+ }
+ // Coalesce rows.
+ if (src_stride_y == width &&
+ dst_stride_argb == width * 4) {
+ width *= height;
+ height = 1;
+ src_stride_y = dst_stride_argb = 0;
+ }
+#if defined(HAS_ARGBCOPYYTOALPHAROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8)) {
+ ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_SSE2;
+ }
+#endif
+#if defined(HAS_ARGBCOPYYTOALPHAROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2) && IS_ALIGNED(width, 16)) {
+ ARGBCopyYToAlphaRow = ARGBCopyYToAlphaRow_AVX2;
+ }
+#endif
+
+ for (y = 0; y < height; ++y) {
+ ARGBCopyYToAlphaRow(src_y, dst_argb, width);
+ src_y += src_stride_y;
+ dst_argb += dst_stride_argb;
+ }
+ return 0;
+}
+
+LIBYUV_API
+int YUY2ToNV12(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height) {
+ int y;
+ int halfwidth = (width + 1) >> 1;
+ void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) =
+ SplitUVRow_C;
+ void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) = InterpolateRow_C;
+ if (!src_yuy2 ||
+ !dst_y || !dst_uv ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_yuy2 = src_yuy2 + (height - 1) * src_stride_yuy2;
+ src_stride_yuy2 = -src_stride_yuy2;
+ }
+#if defined(HAS_SPLITUVROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SplitUVRow = SplitUVRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ SplitUVRow = SplitUVRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ SplitUVRow = SplitUVRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ SplitUVRow = SplitUVRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SplitUVRow = SplitUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ SplitUVRow = SplitUVRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+
+ {
+ int awidth = halfwidth * 2;
+ // 2 rows of uv
+ align_buffer_64(rows, awidth * 2);
+
+ for (y = 0; y < height - 1; y += 2) {
+ // Split Y from UV.
+ SplitUVRow(src_yuy2, dst_y, rows, awidth);
+ SplitUVRow(src_yuy2 + src_stride_yuy2, dst_y + dst_stride_y,
+ rows + awidth, awidth);
+ InterpolateRow(dst_uv, rows, awidth, awidth, 128);
+ src_yuy2 += src_stride_yuy2 * 2;
+ dst_y += dst_stride_y * 2;
+ dst_uv += dst_stride_uv;
+ }
+ if (height & 1) {
+ // Split Y from UV.
+ SplitUVRow(src_yuy2, dst_y, dst_uv, width);
+ }
+ free_aligned_buffer_64(rows);
+ }
+ return 0;
+}
+
+LIBYUV_API
+int UYVYToNV12(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_uv, int dst_stride_uv,
+ int width, int height) {
+ int y;
+ int halfwidth = (width + 1) >> 1;
+ void (*SplitUVRow)(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) =
+ SplitUVRow_C;
+ void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) = InterpolateRow_C;
+ if (!src_uyvy ||
+ !dst_y || !dst_uv ||
+ width <= 0 || height == 0) {
+ return -1;
+ }
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_uyvy = src_uyvy + (height - 1) * src_stride_uyvy;
+ src_stride_uyvy = -src_stride_uyvy;
+ }
+#if defined(HAS_SPLITUVROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ SplitUVRow = SplitUVRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ SplitUVRow = SplitUVRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ SplitUVRow = SplitUVRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ SplitUVRow = SplitUVRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_SPLITUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ SplitUVRow = SplitUVRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ SplitUVRow = SplitUVRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+
+ {
+ int awidth = halfwidth * 2;
+ // 2 rows of uv
+ align_buffer_64(rows, awidth * 2);
+
+ for (y = 0; y < height - 1; y += 2) {
+ // Split Y from UV.
+ SplitUVRow(src_uyvy, rows, dst_y, awidth);
+ SplitUVRow(src_uyvy + src_stride_uyvy, rows + awidth,
+ dst_y + dst_stride_y, awidth);
+ InterpolateRow(dst_uv, rows, awidth, awidth, 128);
+ src_uyvy += src_stride_uyvy * 2;
+ dst_y += dst_stride_y * 2;
+ dst_uv += dst_stride_uv;
+ }
+ if (height & 1) {
+ // Split Y from UV.
+ SplitUVRow(src_uyvy, dst_y, dst_uv, width);
+ }
+ free_aligned_buffer_64(rows);
+ }
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate.cc b/media/libaom/src/third_party/libyuv/source/rotate.cc
new file mode 100644
index 000000000..be3d58920
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate.cc
@@ -0,0 +1,496 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/rotate.h"
+
+#include "libyuv/cpu_id.h"
+#include "libyuv/convert.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/rotate_row.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+LIBYUV_API
+void TransposePlane(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height) {
+ int i = height;
+ void (*TransposeWx8)(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) = TransposeWx8_C;
+#if defined(HAS_TRANSPOSEWX8_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ TransposeWx8 = TransposeWx8_NEON;
+ }
+#endif
+#if defined(HAS_TRANSPOSEWX8_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ TransposeWx8 = TransposeWx8_Any_SSSE3;
+ if (IS_ALIGNED(width, 8)) {
+ TransposeWx8 = TransposeWx8_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_TRANSPOSEWX8_FAST_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ TransposeWx8 = TransposeWx8_Fast_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ TransposeWx8 = TransposeWx8_Fast_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_TRANSPOSEWX8_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
+ if (IS_ALIGNED(width, 4) &&
+ IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) {
+ TransposeWx8 = TransposeWx8_Fast_MIPS_DSPR2;
+ } else {
+ TransposeWx8 = TransposeWx8_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ // Work across the source in 8x8 tiles
+ while (i >= 8) {
+ TransposeWx8(src, src_stride, dst, dst_stride, width);
+ src += 8 * src_stride; // Go down 8 rows.
+ dst += 8; // Move over 8 columns.
+ i -= 8;
+ }
+
+ if (i > 0) {
+ TransposeWxH_C(src, src_stride, dst, dst_stride, width, i);
+ }
+}
+
+LIBYUV_API
+void RotatePlane90(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height) {
+ // Rotate by 90 is a transpose with the source read
+ // from bottom to top. So set the source pointer to the end
+ // of the buffer and flip the sign of the source stride.
+ src += src_stride * (height - 1);
+ src_stride = -src_stride;
+ TransposePlane(src, src_stride, dst, dst_stride, width, height);
+}
+
+LIBYUV_API
+void RotatePlane270(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height) {
+ // Rotate by 270 is a transpose with the destination written
+ // from bottom to top. So set the destination pointer to the end
+ // of the buffer and flip the sign of the destination stride.
+ dst += dst_stride * (width - 1);
+ dst_stride = -dst_stride;
+ TransposePlane(src, src_stride, dst, dst_stride, width, height);
+}
+
+LIBYUV_API
+void RotatePlane180(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height) {
+ // Swap first and last row and mirror the content. Uses a temporary row.
+ align_buffer_64(row, width);
+ const uint8* src_bot = src + src_stride * (height - 1);
+ uint8* dst_bot = dst + dst_stride * (height - 1);
+ int half_height = (height + 1) >> 1;
+ int y;
+ void (*MirrorRow)(const uint8* src, uint8* dst, int width) = MirrorRow_C;
+ void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C;
+#if defined(HAS_MIRRORROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ MirrorRow = MirrorRow_Any_NEON;
+ if (IS_ALIGNED(width, 16)) {
+ MirrorRow = MirrorRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_MIRRORROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ MirrorRow = MirrorRow_Any_SSE2;
+ if (IS_ALIGNED(width, 16)) {
+ MirrorRow = MirrorRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_MIRRORROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ MirrorRow = MirrorRow_Any_SSSE3;
+ if (IS_ALIGNED(width, 16)) {
+ MirrorRow = MirrorRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_MIRRORROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ MirrorRow = MirrorRow_Any_AVX2;
+ if (IS_ALIGNED(width, 32)) {
+ MirrorRow = MirrorRow_AVX2;
+ }
+ }
+#endif
+// TODO(fbarchard): Mirror on mips handle unaligned memory.
+#if defined(HAS_MIRRORROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst, 4) && IS_ALIGNED(dst_stride, 4)) {
+ MirrorRow = MirrorRow_MIPS_DSPR2;
+ }
+#endif
+#if defined(HAS_COPYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ CopyRow = IS_ALIGNED(width, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_AVX)
+ if (TestCpuFlag(kCpuHasAVX)) {
+ CopyRow = IS_ALIGNED(width, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+ }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ CopyRow = CopyRow_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ CopyRow = IS_ALIGNED(width, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ CopyRow = CopyRow_MIPS;
+ }
+#endif
+
+ // Odd height will harmlessly mirror the middle row twice.
+ for (y = 0; y < half_height; ++y) {
+ MirrorRow(src, row, width); // Mirror first row into a buffer
+ src += src_stride;
+ MirrorRow(src_bot, dst, width); // Mirror last row into first row
+ dst += dst_stride;
+ CopyRow(row, dst_bot, width); // Copy first mirrored row into last
+ src_bot -= src_stride;
+ dst_bot -= dst_stride;
+ }
+ free_aligned_buffer_64(row);
+}
+
+LIBYUV_API
+void TransposeUV(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height) {
+ int i = height;
+ void (*TransposeUVWx8)(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width) = TransposeUVWx8_C;
+#if defined(HAS_TRANSPOSEUVWX8_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ TransposeUVWx8 = TransposeUVWx8_NEON;
+ }
+#endif
+#if defined(HAS_TRANSPOSEUVWX8_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(width, 8)) {
+ TransposeUVWx8 = TransposeUVWx8_SSE2;
+ }
+#endif
+#if defined(HAS_TRANSPOSEUVWx8_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(width, 2) &&
+ IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) {
+ TransposeUVWx8 = TransposeUVWx8_MIPS_DSPR2;
+ }
+#endif
+
+ // Work through the source in 8x8 tiles.
+ while (i >= 8) {
+ TransposeUVWx8(src, src_stride,
+ dst_a, dst_stride_a,
+ dst_b, dst_stride_b,
+ width);
+ src += 8 * src_stride; // Go down 8 rows.
+ dst_a += 8; // Move over 8 columns.
+ dst_b += 8; // Move over 8 columns.
+ i -= 8;
+ }
+
+ if (i > 0) {
+ TransposeUVWxH_C(src, src_stride,
+ dst_a, dst_stride_a,
+ dst_b, dst_stride_b,
+ width, i);
+ }
+}
+
+LIBYUV_API
+void RotateUV90(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height) {
+ src += src_stride * (height - 1);
+ src_stride = -src_stride;
+
+ TransposeUV(src, src_stride,
+ dst_a, dst_stride_a,
+ dst_b, dst_stride_b,
+ width, height);
+}
+
+LIBYUV_API
+void RotateUV270(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height) {
+ dst_a += dst_stride_a * (width - 1);
+ dst_b += dst_stride_b * (width - 1);
+ dst_stride_a = -dst_stride_a;
+ dst_stride_b = -dst_stride_b;
+
+ TransposeUV(src, src_stride,
+ dst_a, dst_stride_a,
+ dst_b, dst_stride_b,
+ width, height);
+}
+
+// Rotate 180 is a horizontal and vertical flip.
+LIBYUV_API
+void RotateUV180(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height) {
+ int i;
+ void (*MirrorRowUV)(const uint8* src, uint8* dst_u, uint8* dst_v, int width) =
+ MirrorUVRow_C;
+#if defined(HAS_MIRRORUVROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(width, 8)) {
+ MirrorRowUV = MirrorUVRow_NEON;
+ }
+#endif
+#if defined(HAS_MIRRORROW_UV_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && IS_ALIGNED(width, 16)) {
+ MirrorRowUV = MirrorUVRow_SSSE3;
+ }
+#endif
+#if defined(HAS_MIRRORUVROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src, 4) && IS_ALIGNED(src_stride, 4)) {
+ MirrorRowUV = MirrorUVRow_MIPS_DSPR2;
+ }
+#endif
+
+ dst_a += dst_stride_a * (height - 1);
+ dst_b += dst_stride_b * (height - 1);
+
+ for (i = 0; i < height; ++i) {
+ MirrorRowUV(src, dst_a, dst_b, width);
+ src += src_stride;
+ dst_a -= dst_stride_a;
+ dst_b -= dst_stride_b;
+ }
+}
+
+LIBYUV_API
+int RotatePlane(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height,
+ enum RotationMode mode) {
+ if (!src || width <= 0 || height == 0 || !dst) {
+ return -1;
+ }
+
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src = src + (height - 1) * src_stride;
+ src_stride = -src_stride;
+ }
+
+ switch (mode) {
+ case kRotate0:
+ // copy frame
+ CopyPlane(src, src_stride,
+ dst, dst_stride,
+ width, height);
+ return 0;
+ case kRotate90:
+ RotatePlane90(src, src_stride,
+ dst, dst_stride,
+ width, height);
+ return 0;
+ case kRotate270:
+ RotatePlane270(src, src_stride,
+ dst, dst_stride,
+ width, height);
+ return 0;
+ case kRotate180:
+ RotatePlane180(src, src_stride,
+ dst, dst_stride,
+ width, height);
+ return 0;
+ default:
+ break;
+ }
+ return -1;
+}
+
+LIBYUV_API
+int I420Rotate(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height,
+ enum RotationMode mode) {
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ if (!src_y || !src_u || !src_v || width <= 0 || height == 0 ||
+ !dst_y || !dst_u || !dst_v) {
+ return -1;
+ }
+
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_u = src_u + (halfheight - 1) * src_stride_u;
+ src_v = src_v + (halfheight - 1) * src_stride_v;
+ src_stride_y = -src_stride_y;
+ src_stride_u = -src_stride_u;
+ src_stride_v = -src_stride_v;
+ }
+
+ switch (mode) {
+ case kRotate0:
+ // copy frame
+ return I420Copy(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height);
+ case kRotate90:
+ RotatePlane90(src_y, src_stride_y,
+ dst_y, dst_stride_y,
+ width, height);
+ RotatePlane90(src_u, src_stride_u,
+ dst_u, dst_stride_u,
+ halfwidth, halfheight);
+ RotatePlane90(src_v, src_stride_v,
+ dst_v, dst_stride_v,
+ halfwidth, halfheight);
+ return 0;
+ case kRotate270:
+ RotatePlane270(src_y, src_stride_y,
+ dst_y, dst_stride_y,
+ width, height);
+ RotatePlane270(src_u, src_stride_u,
+ dst_u, dst_stride_u,
+ halfwidth, halfheight);
+ RotatePlane270(src_v, src_stride_v,
+ dst_v, dst_stride_v,
+ halfwidth, halfheight);
+ return 0;
+ case kRotate180:
+ RotatePlane180(src_y, src_stride_y,
+ dst_y, dst_stride_y,
+ width, height);
+ RotatePlane180(src_u, src_stride_u,
+ dst_u, dst_stride_u,
+ halfwidth, halfheight);
+ RotatePlane180(src_v, src_stride_v,
+ dst_v, dst_stride_v,
+ halfwidth, halfheight);
+ return 0;
+ default:
+ break;
+ }
+ return -1;
+}
+
+LIBYUV_API
+int NV12ToI420Rotate(const uint8* src_y, int src_stride_y,
+ const uint8* src_uv, int src_stride_uv,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int width, int height,
+ enum RotationMode mode) {
+ int halfwidth = (width + 1) >> 1;
+ int halfheight = (height + 1) >> 1;
+ if (!src_y || !src_uv || width <= 0 || height == 0 ||
+ !dst_y || !dst_u || !dst_v) {
+ return -1;
+ }
+
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ halfheight = (height + 1) >> 1;
+ src_y = src_y + (height - 1) * src_stride_y;
+ src_uv = src_uv + (halfheight - 1) * src_stride_uv;
+ src_stride_y = -src_stride_y;
+ src_stride_uv = -src_stride_uv;
+ }
+
+ switch (mode) {
+ case kRotate0:
+ // copy frame
+ return NV12ToI420(src_y, src_stride_y,
+ src_uv, src_stride_uv,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ width, height);
+ case kRotate90:
+ RotatePlane90(src_y, src_stride_y,
+ dst_y, dst_stride_y,
+ width, height);
+ RotateUV90(src_uv, src_stride_uv,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ halfwidth, halfheight);
+ return 0;
+ case kRotate270:
+ RotatePlane270(src_y, src_stride_y,
+ dst_y, dst_stride_y,
+ width, height);
+ RotateUV270(src_uv, src_stride_uv,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ halfwidth, halfheight);
+ return 0;
+ case kRotate180:
+ RotatePlane180(src_y, src_stride_y,
+ dst_y, dst_stride_y,
+ width, height);
+ RotateUV180(src_uv, src_stride_uv,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ halfwidth, halfheight);
+ return 0;
+ default:
+ break;
+ }
+ return -1;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_any.cc b/media/libaom/src/third_party/libyuv/source/rotate_any.cc
new file mode 100644
index 000000000..4d6eb34e1
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_any.cc
@@ -0,0 +1,55 @@
+/*
+ * Copyright 2015 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/rotate.h"
+#include "libyuv/rotate_row.h"
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#define TANY(NAMEANY, TPOS_SIMD, TPOS_C, MASK) \
+ void NAMEANY(const uint8* src, int src_stride, \
+ uint8* dst, int dst_stride, int width) { \
+ int r = width & MASK; \
+ int n = width - r; \
+ if (n > 0) { \
+ TPOS_SIMD(src, src_stride, dst, dst_stride, n); \
+ } \
+ TPOS_C(src + n, src_stride, dst + n * dst_stride, dst_stride, r); \
+ }
+
+#ifdef HAS_TRANSPOSEWX8_NEON
+TANY(TransposeWx8_Any_NEON, TransposeWx8_NEON, TransposeWx8_C, 7)
+#endif
+#ifdef HAS_TRANSPOSEWX8_SSSE3
+TANY(TransposeWx8_Any_SSSE3, TransposeWx8_SSSE3, TransposeWx8_C, 7)
+#endif
+#ifdef HAS_TRANSPOSEWX8_FAST_SSSE3
+TANY(TransposeWx8_Fast_Any_SSSE3, TransposeWx8_Fast_SSSE3, TransposeWx8_C, 15)
+#endif
+#ifdef HAS_TRANSPOSEWX8_MIPS_DSPR2
+TANY(TransposeWx8_Any_MIPS_DSPR2, TransposeWx8_MIPS_DSPR2, TransposeWx8_C, 7)
+#endif
+
+#undef TANY
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+
+
+
+
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_argb.cc b/media/libaom/src/third_party/libyuv/source/rotate_argb.cc
new file mode 100644
index 000000000..787c0ad1b
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_argb.cc
@@ -0,0 +1,205 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/rotate.h"
+
+#include "libyuv/cpu_id.h"
+#include "libyuv/convert.h"
+#include "libyuv/planar_functions.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// ARGBScale has a function to copy pixels to a row, striding each source
+// pixel by a constant.
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(_M_IX86) || \
+ (defined(__x86_64__) && !defined(__native_client__)) || defined(__i386__))
+#define HAS_SCALEARGBROWDOWNEVEN_SSE2
+void ScaleARGBRowDownEven_SSE2(const uint8* src_ptr, int src_stride,
+ int src_stepx, uint8* dst_ptr, int dst_width);
+#endif
+#if !defined(LIBYUV_DISABLE_NEON) && !defined(__native_client__) && \
+ (defined(__ARM_NEON__) || defined(LIBYUV_NEON) || defined(__aarch64__))
+#define HAS_SCALEARGBROWDOWNEVEN_NEON
+void ScaleARGBRowDownEven_NEON(const uint8* src_ptr, int src_stride,
+ int src_stepx, uint8* dst_ptr, int dst_width);
+#endif
+
+void ScaleARGBRowDownEven_C(const uint8* src_ptr, int,
+ int src_stepx, uint8* dst_ptr, int dst_width);
+
+static void ARGBTranspose(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width, int height) {
+ int i;
+ int src_pixel_step = src_stride >> 2;
+ void (*ScaleARGBRowDownEven)(const uint8* src_ptr, int src_stride,
+ int src_step, uint8* dst_ptr, int dst_width) = ScaleARGBRowDownEven_C;
+#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(height, 4)) { // Width of dest.
+ ScaleARGBRowDownEven = ScaleARGBRowDownEven_SSE2;
+ }
+#endif
+#if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(height, 4)) { // Width of dest.
+ ScaleARGBRowDownEven = ScaleARGBRowDownEven_NEON;
+ }
+#endif
+
+ for (i = 0; i < width; ++i) { // column of source to row of dest.
+ ScaleARGBRowDownEven(src, 0, src_pixel_step, dst, height);
+ dst += dst_stride;
+ src += 4;
+ }
+}
+
+void ARGBRotate90(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width, int height) {
+ // Rotate by 90 is a ARGBTranspose with the source read
+ // from bottom to top. So set the source pointer to the end
+ // of the buffer and flip the sign of the source stride.
+ src += src_stride * (height - 1);
+ src_stride = -src_stride;
+ ARGBTranspose(src, src_stride, dst, dst_stride, width, height);
+}
+
+void ARGBRotate270(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width, int height) {
+ // Rotate by 270 is a ARGBTranspose with the destination written
+ // from bottom to top. So set the destination pointer to the end
+ // of the buffer and flip the sign of the destination stride.
+ dst += dst_stride * (width - 1);
+ dst_stride = -dst_stride;
+ ARGBTranspose(src, src_stride, dst, dst_stride, width, height);
+}
+
+void ARGBRotate180(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width, int height) {
+ // Swap first and last row and mirror the content. Uses a temporary row.
+ align_buffer_64(row, width * 4);
+ const uint8* src_bot = src + src_stride * (height - 1);
+ uint8* dst_bot = dst + dst_stride * (height - 1);
+ int half_height = (height + 1) >> 1;
+ int y;
+ void (*ARGBMirrorRow)(const uint8* src, uint8* dst, int width) =
+ ARGBMirrorRow_C;
+ void (*CopyRow)(const uint8* src, uint8* dst, int width) = CopyRow_C;
+#if defined(HAS_ARGBMIRRORROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ARGBMirrorRow = ARGBMirrorRow_Any_NEON;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBMirrorRow = ARGBMirrorRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_ARGBMIRRORROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ARGBMirrorRow = ARGBMirrorRow_Any_SSE2;
+ if (IS_ALIGNED(width, 4)) {
+ ARGBMirrorRow = ARGBMirrorRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_ARGBMIRRORROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ARGBMirrorRow = ARGBMirrorRow_Any_AVX2;
+ if (IS_ALIGNED(width, 8)) {
+ ARGBMirrorRow = ARGBMirrorRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_COPYROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_SSE2 : CopyRow_Any_SSE2;
+ }
+#endif
+#if defined(HAS_COPYROW_AVX)
+ if (TestCpuFlag(kCpuHasAVX)) {
+ CopyRow = IS_ALIGNED(width * 4, 64) ? CopyRow_AVX : CopyRow_Any_AVX;
+ }
+#endif
+#if defined(HAS_COPYROW_ERMS)
+ if (TestCpuFlag(kCpuHasERMS)) {
+ CopyRow = CopyRow_ERMS;
+ }
+#endif
+#if defined(HAS_COPYROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ CopyRow = IS_ALIGNED(width * 4, 32) ? CopyRow_NEON : CopyRow_Any_NEON;
+ }
+#endif
+#if defined(HAS_COPYROW_MIPS)
+ if (TestCpuFlag(kCpuHasMIPS)) {
+ CopyRow = CopyRow_MIPS;
+ }
+#endif
+
+ // Odd height will harmlessly mirror the middle row twice.
+ for (y = 0; y < half_height; ++y) {
+ ARGBMirrorRow(src, row, width); // Mirror first row into a buffer
+ ARGBMirrorRow(src_bot, dst, width); // Mirror last row into first row
+ CopyRow(row, dst_bot, width * 4); // Copy first mirrored row into last
+ src += src_stride;
+ dst += dst_stride;
+ src_bot -= src_stride;
+ dst_bot -= dst_stride;
+ }
+ free_aligned_buffer_64(row);
+}
+
+LIBYUV_API
+int ARGBRotate(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_argb, int dst_stride_argb, int width, int height,
+ enum RotationMode mode) {
+ if (!src_argb || width <= 0 || height == 0 || !dst_argb) {
+ return -1;
+ }
+
+ // Negative height means invert the image.
+ if (height < 0) {
+ height = -height;
+ src_argb = src_argb + (height - 1) * src_stride_argb;
+ src_stride_argb = -src_stride_argb;
+ }
+
+ switch (mode) {
+ case kRotate0:
+ // copy frame
+ return ARGBCopy(src_argb, src_stride_argb,
+ dst_argb, dst_stride_argb,
+ width, height);
+ case kRotate90:
+ ARGBRotate90(src_argb, src_stride_argb,
+ dst_argb, dst_stride_argb,
+ width, height);
+ return 0;
+ case kRotate270:
+ ARGBRotate270(src_argb, src_stride_argb,
+ dst_argb, dst_stride_argb,
+ width, height);
+ return 0;
+ case kRotate180:
+ ARGBRotate180(src_argb, src_stride_argb,
+ dst_argb, dst_stride_argb,
+ width, height);
+ return 0;
+ default:
+ break;
+ }
+ return -1;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_common.cc b/media/libaom/src/third_party/libyuv/source/rotate_common.cc
new file mode 100644
index 000000000..b33a9a0c6
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_common.cc
@@ -0,0 +1,92 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/rotate_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+void TransposeWx8_C(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ dst[0] = src[0 * src_stride];
+ dst[1] = src[1 * src_stride];
+ dst[2] = src[2 * src_stride];
+ dst[3] = src[3 * src_stride];
+ dst[4] = src[4 * src_stride];
+ dst[5] = src[5 * src_stride];
+ dst[6] = src[6 * src_stride];
+ dst[7] = src[7 * src_stride];
+ ++src;
+ dst += dst_stride;
+ }
+}
+
+void TransposeUVWx8_C(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ dst_a[0] = src[0 * src_stride + 0];
+ dst_b[0] = src[0 * src_stride + 1];
+ dst_a[1] = src[1 * src_stride + 0];
+ dst_b[1] = src[1 * src_stride + 1];
+ dst_a[2] = src[2 * src_stride + 0];
+ dst_b[2] = src[2 * src_stride + 1];
+ dst_a[3] = src[3 * src_stride + 0];
+ dst_b[3] = src[3 * src_stride + 1];
+ dst_a[4] = src[4 * src_stride + 0];
+ dst_b[4] = src[4 * src_stride + 1];
+ dst_a[5] = src[5 * src_stride + 0];
+ dst_b[5] = src[5 * src_stride + 1];
+ dst_a[6] = src[6 * src_stride + 0];
+ dst_b[6] = src[6 * src_stride + 1];
+ dst_a[7] = src[7 * src_stride + 0];
+ dst_b[7] = src[7 * src_stride + 1];
+ src += 2;
+ dst_a += dst_stride_a;
+ dst_b += dst_stride_b;
+ }
+}
+
+void TransposeWxH_C(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width, int height) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ int j;
+ for (j = 0; j < height; ++j) {
+ dst[i * dst_stride + j] = src[j * src_stride + i];
+ }
+ }
+}
+
+void TransposeUVWxH_C(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width, int height) {
+ int i;
+ for (i = 0; i < width * 2; i += 2) {
+ int j;
+ for (j = 0; j < height; ++j) {
+ dst_a[j + ((i >> 1) * dst_stride_a)] = src[i + (j * src_stride)];
+ dst_b[j + ((i >> 1) * dst_stride_b)] = src[i + (j * src_stride) + 1];
+ }
+ }
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_gcc.cc b/media/libaom/src/third_party/libyuv/source/rotate_gcc.cc
new file mode 100644
index 000000000..fd385bcd3
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_gcc.cc
@@ -0,0 +1,493 @@
+/*
+ * Copyright 2015 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/rotate_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC x86 and x64.
+#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
+
+#if !defined(LIBYUV_DISABLE_X86) && \
+ (defined(__i386__) || (defined(__x86_64__) && !defined(__native_client__)))
+void TransposeWx8_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ asm volatile (
+ // Read in the data from the source pointer.
+ // First round of bit swap.
+ ".p2align 2 \n"
+ "1: \n"
+ "movq (%0),%%xmm0 \n"
+ "movq (%0,%3),%%xmm1 \n"
+ "lea (%0,%3,2),%0 \n"
+ "punpcklbw %%xmm1,%%xmm0 \n"
+ "movq (%0),%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "palignr $0x8,%%xmm1,%%xmm1 \n"
+ "movq (%0,%3),%%xmm3 \n"
+ "lea (%0,%3,2),%0 \n"
+ "punpcklbw %%xmm3,%%xmm2 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "movq (%0),%%xmm4 \n"
+ "palignr $0x8,%%xmm3,%%xmm3 \n"
+ "movq (%0,%3),%%xmm5 \n"
+ "lea (%0,%3,2),%0 \n"
+ "punpcklbw %%xmm5,%%xmm4 \n"
+ "movdqa %%xmm4,%%xmm5 \n"
+ "movq (%0),%%xmm6 \n"
+ "palignr $0x8,%%xmm5,%%xmm5 \n"
+ "movq (%0,%3),%%xmm7 \n"
+ "lea (%0,%3,2),%0 \n"
+ "punpcklbw %%xmm7,%%xmm6 \n"
+ "neg %3 \n"
+ "movdqa %%xmm6,%%xmm7 \n"
+ "lea 0x8(%0,%3,8),%0 \n"
+ "palignr $0x8,%%xmm7,%%xmm7 \n"
+ "neg %3 \n"
+ // Second round of bit swap.
+ "punpcklwd %%xmm2,%%xmm0 \n"
+ "punpcklwd %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "palignr $0x8,%%xmm2,%%xmm2 \n"
+ "palignr $0x8,%%xmm3,%%xmm3 \n"
+ "punpcklwd %%xmm6,%%xmm4 \n"
+ "punpcklwd %%xmm7,%%xmm5 \n"
+ "movdqa %%xmm4,%%xmm6 \n"
+ "movdqa %%xmm5,%%xmm7 \n"
+ "palignr $0x8,%%xmm6,%%xmm6 \n"
+ "palignr $0x8,%%xmm7,%%xmm7 \n"
+ // Third round of bit swap.
+ // Write to the destination pointer.
+ "punpckldq %%xmm4,%%xmm0 \n"
+ "movq %%xmm0,(%1) \n"
+ "movdqa %%xmm0,%%xmm4 \n"
+ "palignr $0x8,%%xmm4,%%xmm4 \n"
+ "movq %%xmm4,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm6,%%xmm2 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "movq %%xmm2,(%1) \n"
+ "palignr $0x8,%%xmm6,%%xmm6 \n"
+ "punpckldq %%xmm5,%%xmm1 \n"
+ "movq %%xmm6,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "movdqa %%xmm1,%%xmm5 \n"
+ "movq %%xmm1,(%1) \n"
+ "palignr $0x8,%%xmm5,%%xmm5 \n"
+ "movq %%xmm5,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm7,%%xmm3 \n"
+ "movq %%xmm3,(%1) \n"
+ "movdqa %%xmm3,%%xmm7 \n"
+ "palignr $0x8,%%xmm7,%%xmm7 \n"
+ "sub $0x8,%2 \n"
+ "movq %%xmm7,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ : "r"((intptr_t)(src_stride)), // %3
+ "r"((intptr_t)(dst_stride)) // %4
+ : "memory", "cc",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+#if !defined(LIBYUV_DISABLE_X86) && defined(__i386__) && !defined(__clang__)
+void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width);
+ asm (
+ DECLARE_FUNCTION(TransposeUVWx8_SSE2)
+ "push %ebx \n"
+ "push %esi \n"
+ "push %edi \n"
+ "push %ebp \n"
+ "mov 0x14(%esp),%eax \n"
+ "mov 0x18(%esp),%edi \n"
+ "mov 0x1c(%esp),%edx \n"
+ "mov 0x20(%esp),%esi \n"
+ "mov 0x24(%esp),%ebx \n"
+ "mov 0x28(%esp),%ebp \n"
+ "mov %esp,%ecx \n"
+ "sub $0x14,%esp \n"
+ "and $0xfffffff0,%esp \n"
+ "mov %ecx,0x10(%esp) \n"
+ "mov 0x2c(%ecx),%ecx \n"
+
+"1: \n"
+ "movdqu (%eax),%xmm0 \n"
+ "movdqu (%eax,%edi,1),%xmm1 \n"
+ "lea (%eax,%edi,2),%eax \n"
+ "movdqa %xmm0,%xmm7 \n"
+ "punpcklbw %xmm1,%xmm0 \n"
+ "punpckhbw %xmm1,%xmm7 \n"
+ "movdqa %xmm7,%xmm1 \n"
+ "movdqu (%eax),%xmm2 \n"
+ "movdqu (%eax,%edi,1),%xmm3 \n"
+ "lea (%eax,%edi,2),%eax \n"
+ "movdqa %xmm2,%xmm7 \n"
+ "punpcklbw %xmm3,%xmm2 \n"
+ "punpckhbw %xmm3,%xmm7 \n"
+ "movdqa %xmm7,%xmm3 \n"
+ "movdqu (%eax),%xmm4 \n"
+ "movdqu (%eax,%edi,1),%xmm5 \n"
+ "lea (%eax,%edi,2),%eax \n"
+ "movdqa %xmm4,%xmm7 \n"
+ "punpcklbw %xmm5,%xmm4 \n"
+ "punpckhbw %xmm5,%xmm7 \n"
+ "movdqa %xmm7,%xmm5 \n"
+ "movdqu (%eax),%xmm6 \n"
+ "movdqu (%eax,%edi,1),%xmm7 \n"
+ "lea (%eax,%edi,2),%eax \n"
+ "movdqu %xmm5,(%esp) \n"
+ "neg %edi \n"
+ "movdqa %xmm6,%xmm5 \n"
+ "punpcklbw %xmm7,%xmm6 \n"
+ "punpckhbw %xmm7,%xmm5 \n"
+ "movdqa %xmm5,%xmm7 \n"
+ "lea 0x10(%eax,%edi,8),%eax \n"
+ "neg %edi \n"
+ "movdqa %xmm0,%xmm5 \n"
+ "punpcklwd %xmm2,%xmm0 \n"
+ "punpckhwd %xmm2,%xmm5 \n"
+ "movdqa %xmm5,%xmm2 \n"
+ "movdqa %xmm1,%xmm5 \n"
+ "punpcklwd %xmm3,%xmm1 \n"
+ "punpckhwd %xmm3,%xmm5 \n"
+ "movdqa %xmm5,%xmm3 \n"
+ "movdqa %xmm4,%xmm5 \n"
+ "punpcklwd %xmm6,%xmm4 \n"
+ "punpckhwd %xmm6,%xmm5 \n"
+ "movdqa %xmm5,%xmm6 \n"
+ "movdqu (%esp),%xmm5 \n"
+ "movdqu %xmm6,(%esp) \n"
+ "movdqa %xmm5,%xmm6 \n"
+ "punpcklwd %xmm7,%xmm5 \n"
+ "punpckhwd %xmm7,%xmm6 \n"
+ "movdqa %xmm6,%xmm7 \n"
+ "movdqa %xmm0,%xmm6 \n"
+ "punpckldq %xmm4,%xmm0 \n"
+ "punpckhdq %xmm4,%xmm6 \n"
+ "movdqa %xmm6,%xmm4 \n"
+ "movdqu (%esp),%xmm6 \n"
+ "movlpd %xmm0,(%edx) \n"
+ "movhpd %xmm0,(%ebx) \n"
+ "movlpd %xmm4,(%edx,%esi,1) \n"
+ "lea (%edx,%esi,2),%edx \n"
+ "movhpd %xmm4,(%ebx,%ebp,1) \n"
+ "lea (%ebx,%ebp,2),%ebx \n"
+ "movdqa %xmm2,%xmm0 \n"
+ "punpckldq %xmm6,%xmm2 \n"
+ "movlpd %xmm2,(%edx) \n"
+ "movhpd %xmm2,(%ebx) \n"
+ "punpckhdq %xmm6,%xmm0 \n"
+ "movlpd %xmm0,(%edx,%esi,1) \n"
+ "lea (%edx,%esi,2),%edx \n"
+ "movhpd %xmm0,(%ebx,%ebp,1) \n"
+ "lea (%ebx,%ebp,2),%ebx \n"
+ "movdqa %xmm1,%xmm0 \n"
+ "punpckldq %xmm5,%xmm1 \n"
+ "movlpd %xmm1,(%edx) \n"
+ "movhpd %xmm1,(%ebx) \n"
+ "punpckhdq %xmm5,%xmm0 \n"
+ "movlpd %xmm0,(%edx,%esi,1) \n"
+ "lea (%edx,%esi,2),%edx \n"
+ "movhpd %xmm0,(%ebx,%ebp,1) \n"
+ "lea (%ebx,%ebp,2),%ebx \n"
+ "movdqa %xmm3,%xmm0 \n"
+ "punpckldq %xmm7,%xmm3 \n"
+ "movlpd %xmm3,(%edx) \n"
+ "movhpd %xmm3,(%ebx) \n"
+ "punpckhdq %xmm7,%xmm0 \n"
+ "sub $0x8,%ecx \n"
+ "movlpd %xmm0,(%edx,%esi,1) \n"
+ "lea (%edx,%esi,2),%edx \n"
+ "movhpd %xmm0,(%ebx,%ebp,1) \n"
+ "lea (%ebx,%ebp,2),%ebx \n"
+ "jg 1b \n"
+ "mov 0x10(%esp),%esp \n"
+ "pop %ebp \n"
+ "pop %edi \n"
+ "pop %esi \n"
+ "pop %ebx \n"
+#if defined(__native_client__)
+ "pop %ecx \n"
+ "and $0xffffffe0,%ecx \n"
+ "jmp *%ecx \n"
+#else
+ "ret \n"
+#endif
+);
+#endif
+#if !defined(LIBYUV_DISABLE_X86) && !defined(__native_client__) && \
+ defined(__x86_64__)
+// 64 bit version has enough registers to do 16x8 to 8x16 at a time.
+void TransposeWx8_Fast_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ asm volatile (
+ // Read in the data from the source pointer.
+ // First round of bit swap.
+ ".p2align 2 \n"
+"1: \n"
+ "movdqu (%0),%%xmm0 \n"
+ "movdqu (%0,%3),%%xmm1 \n"
+ "lea (%0,%3,2),%0 \n"
+ "movdqa %%xmm0,%%xmm8 \n"
+ "punpcklbw %%xmm1,%%xmm0 \n"
+ "punpckhbw %%xmm1,%%xmm8 \n"
+ "movdqu (%0),%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm8,%%xmm9 \n"
+ "palignr $0x8,%%xmm1,%%xmm1 \n"
+ "palignr $0x8,%%xmm9,%%xmm9 \n"
+ "movdqu (%0,%3),%%xmm3 \n"
+ "lea (%0,%3,2),%0 \n"
+ "movdqa %%xmm2,%%xmm10 \n"
+ "punpcklbw %%xmm3,%%xmm2 \n"
+ "punpckhbw %%xmm3,%%xmm10 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "movdqa %%xmm10,%%xmm11 \n"
+ "movdqu (%0),%%xmm4 \n"
+ "palignr $0x8,%%xmm3,%%xmm3 \n"
+ "palignr $0x8,%%xmm11,%%xmm11 \n"
+ "movdqu (%0,%3),%%xmm5 \n"
+ "lea (%0,%3,2),%0 \n"
+ "movdqa %%xmm4,%%xmm12 \n"
+ "punpcklbw %%xmm5,%%xmm4 \n"
+ "punpckhbw %%xmm5,%%xmm12 \n"
+ "movdqa %%xmm4,%%xmm5 \n"
+ "movdqa %%xmm12,%%xmm13 \n"
+ "movdqu (%0),%%xmm6 \n"
+ "palignr $0x8,%%xmm5,%%xmm5 \n"
+ "palignr $0x8,%%xmm13,%%xmm13 \n"
+ "movdqu (%0,%3),%%xmm7 \n"
+ "lea (%0,%3,2),%0 \n"
+ "movdqa %%xmm6,%%xmm14 \n"
+ "punpcklbw %%xmm7,%%xmm6 \n"
+ "punpckhbw %%xmm7,%%xmm14 \n"
+ "neg %3 \n"
+ "movdqa %%xmm6,%%xmm7 \n"
+ "movdqa %%xmm14,%%xmm15 \n"
+ "lea 0x10(%0,%3,8),%0 \n"
+ "palignr $0x8,%%xmm7,%%xmm7 \n"
+ "palignr $0x8,%%xmm15,%%xmm15 \n"
+ "neg %3 \n"
+ // Second round of bit swap.
+ "punpcklwd %%xmm2,%%xmm0 \n"
+ "punpcklwd %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "palignr $0x8,%%xmm2,%%xmm2 \n"
+ "palignr $0x8,%%xmm3,%%xmm3 \n"
+ "punpcklwd %%xmm6,%%xmm4 \n"
+ "punpcklwd %%xmm7,%%xmm5 \n"
+ "movdqa %%xmm4,%%xmm6 \n"
+ "movdqa %%xmm5,%%xmm7 \n"
+ "palignr $0x8,%%xmm6,%%xmm6 \n"
+ "palignr $0x8,%%xmm7,%%xmm7 \n"
+ "punpcklwd %%xmm10,%%xmm8 \n"
+ "punpcklwd %%xmm11,%%xmm9 \n"
+ "movdqa %%xmm8,%%xmm10 \n"
+ "movdqa %%xmm9,%%xmm11 \n"
+ "palignr $0x8,%%xmm10,%%xmm10 \n"
+ "palignr $0x8,%%xmm11,%%xmm11 \n"
+ "punpcklwd %%xmm14,%%xmm12 \n"
+ "punpcklwd %%xmm15,%%xmm13 \n"
+ "movdqa %%xmm12,%%xmm14 \n"
+ "movdqa %%xmm13,%%xmm15 \n"
+ "palignr $0x8,%%xmm14,%%xmm14 \n"
+ "palignr $0x8,%%xmm15,%%xmm15 \n"
+ // Third round of bit swap.
+ // Write to the destination pointer.
+ "punpckldq %%xmm4,%%xmm0 \n"
+ "movq %%xmm0,(%1) \n"
+ "movdqa %%xmm0,%%xmm4 \n"
+ "palignr $0x8,%%xmm4,%%xmm4 \n"
+ "movq %%xmm4,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm6,%%xmm2 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "movq %%xmm2,(%1) \n"
+ "palignr $0x8,%%xmm6,%%xmm6 \n"
+ "punpckldq %%xmm5,%%xmm1 \n"
+ "movq %%xmm6,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "movdqa %%xmm1,%%xmm5 \n"
+ "movq %%xmm1,(%1) \n"
+ "palignr $0x8,%%xmm5,%%xmm5 \n"
+ "movq %%xmm5,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm7,%%xmm3 \n"
+ "movq %%xmm3,(%1) \n"
+ "movdqa %%xmm3,%%xmm7 \n"
+ "palignr $0x8,%%xmm7,%%xmm7 \n"
+ "movq %%xmm7,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm12,%%xmm8 \n"
+ "movq %%xmm8,(%1) \n"
+ "movdqa %%xmm8,%%xmm12 \n"
+ "palignr $0x8,%%xmm12,%%xmm12 \n"
+ "movq %%xmm12,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm14,%%xmm10 \n"
+ "movdqa %%xmm10,%%xmm14 \n"
+ "movq %%xmm10,(%1) \n"
+ "palignr $0x8,%%xmm14,%%xmm14 \n"
+ "punpckldq %%xmm13,%%xmm9 \n"
+ "movq %%xmm14,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "movdqa %%xmm9,%%xmm13 \n"
+ "movq %%xmm9,(%1) \n"
+ "palignr $0x8,%%xmm13,%%xmm13 \n"
+ "movq %%xmm13,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "punpckldq %%xmm15,%%xmm11 \n"
+ "movq %%xmm11,(%1) \n"
+ "movdqa %%xmm11,%%xmm15 \n"
+ "palignr $0x8,%%xmm15,%%xmm15 \n"
+ "sub $0x10,%2 \n"
+ "movq %%xmm15,(%1,%4) \n"
+ "lea (%1,%4,2),%1 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ : "r"((intptr_t)(src_stride)), // %3
+ "r"((intptr_t)(dst_stride)) // %4
+ : "memory", "cc",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"
+);
+}
+
+void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b, int width) {
+ asm volatile (
+ // Read in the data from the source pointer.
+ // First round of bit swap.
+ ".p2align 2 \n"
+"1: \n"
+ "movdqu (%0),%%xmm0 \n"
+ "movdqu (%0,%4),%%xmm1 \n"
+ "lea (%0,%4,2),%0 \n"
+ "movdqa %%xmm0,%%xmm8 \n"
+ "punpcklbw %%xmm1,%%xmm0 \n"
+ "punpckhbw %%xmm1,%%xmm8 \n"
+ "movdqa %%xmm8,%%xmm1 \n"
+ "movdqu (%0),%%xmm2 \n"
+ "movdqu (%0,%4),%%xmm3 \n"
+ "lea (%0,%4,2),%0 \n"
+ "movdqa %%xmm2,%%xmm8 \n"
+ "punpcklbw %%xmm3,%%xmm2 \n"
+ "punpckhbw %%xmm3,%%xmm8 \n"
+ "movdqa %%xmm8,%%xmm3 \n"
+ "movdqu (%0),%%xmm4 \n"
+ "movdqu (%0,%4),%%xmm5 \n"
+ "lea (%0,%4,2),%0 \n"
+ "movdqa %%xmm4,%%xmm8 \n"
+ "punpcklbw %%xmm5,%%xmm4 \n"
+ "punpckhbw %%xmm5,%%xmm8 \n"
+ "movdqa %%xmm8,%%xmm5 \n"
+ "movdqu (%0),%%xmm6 \n"
+ "movdqu (%0,%4),%%xmm7 \n"
+ "lea (%0,%4,2),%0 \n"
+ "movdqa %%xmm6,%%xmm8 \n"
+ "punpcklbw %%xmm7,%%xmm6 \n"
+ "neg %4 \n"
+ "lea 0x10(%0,%4,8),%0 \n"
+ "punpckhbw %%xmm7,%%xmm8 \n"
+ "movdqa %%xmm8,%%xmm7 \n"
+ "neg %4 \n"
+ // Second round of bit swap.
+ "movdqa %%xmm0,%%xmm8 \n"
+ "movdqa %%xmm1,%%xmm9 \n"
+ "punpckhwd %%xmm2,%%xmm8 \n"
+ "punpckhwd %%xmm3,%%xmm9 \n"
+ "punpcklwd %%xmm2,%%xmm0 \n"
+ "punpcklwd %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm8,%%xmm2 \n"
+ "movdqa %%xmm9,%%xmm3 \n"
+ "movdqa %%xmm4,%%xmm8 \n"
+ "movdqa %%xmm5,%%xmm9 \n"
+ "punpckhwd %%xmm6,%%xmm8 \n"
+ "punpckhwd %%xmm7,%%xmm9 \n"
+ "punpcklwd %%xmm6,%%xmm4 \n"
+ "punpcklwd %%xmm7,%%xmm5 \n"
+ "movdqa %%xmm8,%%xmm6 \n"
+ "movdqa %%xmm9,%%xmm7 \n"
+ // Third round of bit swap.
+ // Write to the destination pointer.
+ "movdqa %%xmm0,%%xmm8 \n"
+ "punpckldq %%xmm4,%%xmm0 \n"
+ "movlpd %%xmm0,(%1) \n" // Write back U channel
+ "movhpd %%xmm0,(%2) \n" // Write back V channel
+ "punpckhdq %%xmm4,%%xmm8 \n"
+ "movlpd %%xmm8,(%1,%5) \n"
+ "lea (%1,%5,2),%1 \n"
+ "movhpd %%xmm8,(%2,%6) \n"
+ "lea (%2,%6,2),%2 \n"
+ "movdqa %%xmm2,%%xmm8 \n"
+ "punpckldq %%xmm6,%%xmm2 \n"
+ "movlpd %%xmm2,(%1) \n"
+ "movhpd %%xmm2,(%2) \n"
+ "punpckhdq %%xmm6,%%xmm8 \n"
+ "movlpd %%xmm8,(%1,%5) \n"
+ "lea (%1,%5,2),%1 \n"
+ "movhpd %%xmm8,(%2,%6) \n"
+ "lea (%2,%6,2),%2 \n"
+ "movdqa %%xmm1,%%xmm8 \n"
+ "punpckldq %%xmm5,%%xmm1 \n"
+ "movlpd %%xmm1,(%1) \n"
+ "movhpd %%xmm1,(%2) \n"
+ "punpckhdq %%xmm5,%%xmm8 \n"
+ "movlpd %%xmm8,(%1,%5) \n"
+ "lea (%1,%5,2),%1 \n"
+ "movhpd %%xmm8,(%2,%6) \n"
+ "lea (%2,%6,2),%2 \n"
+ "movdqa %%xmm3,%%xmm8 \n"
+ "punpckldq %%xmm7,%%xmm3 \n"
+ "movlpd %%xmm3,(%1) \n"
+ "movhpd %%xmm3,(%2) \n"
+ "punpckhdq %%xmm7,%%xmm8 \n"
+ "sub $0x8,%3 \n"
+ "movlpd %%xmm8,(%1,%5) \n"
+ "lea (%1,%5,2),%1 \n"
+ "movhpd %%xmm8,(%2,%6) \n"
+ "lea (%2,%6,2),%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst_a), // %1
+ "+r"(dst_b), // %2
+ "+r"(width) // %3
+ : "r"((intptr_t)(src_stride)), // %4
+ "r"((intptr_t)(dst_stride_a)), // %5
+ "r"((intptr_t)(dst_stride_b)) // %6
+ : "memory", "cc",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9"
+);
+}
+#endif
+#endif
+
+#endif // defined(__x86_64__) || defined(__i386__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_mips.cc b/media/libaom/src/third_party/libyuv/source/rotate_mips.cc
new file mode 100644
index 000000000..efe6bd909
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_mips.cc
@@ -0,0 +1,484 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/rotate_row.h"
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if !defined(LIBYUV_DISABLE_MIPS) && \
+ defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \
+ (_MIPS_SIM == _MIPS_SIM_ABI32)
+
+void TransposeWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "sll $t2, %[src_stride], 0x1 \n" // src_stride x 2
+ "sll $t4, %[src_stride], 0x2 \n" // src_stride x 4
+ "sll $t9, %[src_stride], 0x3 \n" // src_stride x 8
+ "addu $t3, $t2, %[src_stride] \n"
+ "addu $t5, $t4, %[src_stride] \n"
+ "addu $t6, $t2, $t4 \n"
+ "andi $t0, %[dst], 0x3 \n"
+ "andi $t1, %[dst_stride], 0x3 \n"
+ "or $t0, $t0, $t1 \n"
+ "bnez $t0, 11f \n"
+ " subu $t7, $t9, %[src_stride] \n"
+//dst + dst_stride word aligned
+ "1: \n"
+ "lbu $t0, 0(%[src]) \n"
+ "lbux $t1, %[src_stride](%[src]) \n"
+ "lbux $t8, $t2(%[src]) \n"
+ "lbux $t9, $t3(%[src]) \n"
+ "sll $t1, $t1, 16 \n"
+ "sll $t9, $t9, 16 \n"
+ "or $t0, $t0, $t1 \n"
+ "or $t8, $t8, $t9 \n"
+ "precr.qb.ph $s0, $t8, $t0 \n"
+ "lbux $t0, $t4(%[src]) \n"
+ "lbux $t1, $t5(%[src]) \n"
+ "lbux $t8, $t6(%[src]) \n"
+ "lbux $t9, $t7(%[src]) \n"
+ "sll $t1, $t1, 16 \n"
+ "sll $t9, $t9, 16 \n"
+ "or $t0, $t0, $t1 \n"
+ "or $t8, $t8, $t9 \n"
+ "precr.qb.ph $s1, $t8, $t0 \n"
+ "sw $s0, 0(%[dst]) \n"
+ "addiu %[width], -1 \n"
+ "addiu %[src], 1 \n"
+ "sw $s1, 4(%[dst]) \n"
+ "bnez %[width], 1b \n"
+ " addu %[dst], %[dst], %[dst_stride] \n"
+ "b 2f \n"
+//dst + dst_stride unaligned
+ "11: \n"
+ "lbu $t0, 0(%[src]) \n"
+ "lbux $t1, %[src_stride](%[src]) \n"
+ "lbux $t8, $t2(%[src]) \n"
+ "lbux $t9, $t3(%[src]) \n"
+ "sll $t1, $t1, 16 \n"
+ "sll $t9, $t9, 16 \n"
+ "or $t0, $t0, $t1 \n"
+ "or $t8, $t8, $t9 \n"
+ "precr.qb.ph $s0, $t8, $t0 \n"
+ "lbux $t0, $t4(%[src]) \n"
+ "lbux $t1, $t5(%[src]) \n"
+ "lbux $t8, $t6(%[src]) \n"
+ "lbux $t9, $t7(%[src]) \n"
+ "sll $t1, $t1, 16 \n"
+ "sll $t9, $t9, 16 \n"
+ "or $t0, $t0, $t1 \n"
+ "or $t8, $t8, $t9 \n"
+ "precr.qb.ph $s1, $t8, $t0 \n"
+ "swr $s0, 0(%[dst]) \n"
+ "swl $s0, 3(%[dst]) \n"
+ "addiu %[width], -1 \n"
+ "addiu %[src], 1 \n"
+ "swr $s1, 4(%[dst]) \n"
+ "swl $s1, 7(%[dst]) \n"
+ "bnez %[width], 11b \n"
+ "addu %[dst], %[dst], %[dst_stride] \n"
+ "2: \n"
+ ".set pop \n"
+ :[src] "+r" (src),
+ [dst] "+r" (dst),
+ [width] "+r" (width)
+ :[src_stride] "r" (src_stride),
+ [dst_stride] "r" (dst_stride)
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9",
+ "s0", "s1"
+ );
+}
+
+void TransposeWx8_Fast_MIPS_DSPR2(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ __asm__ __volatile__ (
+ ".set noat \n"
+ ".set push \n"
+ ".set noreorder \n"
+ "beqz %[width], 2f \n"
+ " sll $t2, %[src_stride], 0x1 \n" // src_stride x 2
+ "sll $t4, %[src_stride], 0x2 \n" // src_stride x 4
+ "sll $t9, %[src_stride], 0x3 \n" // src_stride x 8
+ "addu $t3, $t2, %[src_stride] \n"
+ "addu $t5, $t4, %[src_stride] \n"
+ "addu $t6, $t2, $t4 \n"
+
+ "srl $AT, %[width], 0x2 \n"
+ "andi $t0, %[dst], 0x3 \n"
+ "andi $t1, %[dst_stride], 0x3 \n"
+ "or $t0, $t0, $t1 \n"
+ "bnez $t0, 11f \n"
+ " subu $t7, $t9, %[src_stride] \n"
+//dst + dst_stride word aligned
+ "1: \n"
+ "lw $t0, 0(%[src]) \n"
+ "lwx $t1, %[src_stride](%[src]) \n"
+ "lwx $t8, $t2(%[src]) \n"
+ "lwx $t9, $t3(%[src]) \n"
+
+// t0 = | 30 | 20 | 10 | 00 |
+// t1 = | 31 | 21 | 11 | 01 |
+// t8 = | 32 | 22 | 12 | 02 |
+// t9 = | 33 | 23 | 13 | 03 |
+
+ "precr.qb.ph $s0, $t1, $t0 \n"
+ "precr.qb.ph $s1, $t9, $t8 \n"
+ "precrq.qb.ph $s2, $t1, $t0 \n"
+ "precrq.qb.ph $s3, $t9, $t8 \n"
+
+ // s0 = | 21 | 01 | 20 | 00 |
+ // s1 = | 23 | 03 | 22 | 02 |
+ // s2 = | 31 | 11 | 30 | 10 |
+ // s3 = | 33 | 13 | 32 | 12 |
+
+ "precr.qb.ph $s4, $s1, $s0 \n"
+ "precrq.qb.ph $s5, $s1, $s0 \n"
+ "precr.qb.ph $s6, $s3, $s2 \n"
+ "precrq.qb.ph $s7, $s3, $s2 \n"
+
+ // s4 = | 03 | 02 | 01 | 00 |
+ // s5 = | 23 | 22 | 21 | 20 |
+ // s6 = | 13 | 12 | 11 | 10 |
+ // s7 = | 33 | 32 | 31 | 30 |
+
+ "lwx $t0, $t4(%[src]) \n"
+ "lwx $t1, $t5(%[src]) \n"
+ "lwx $t8, $t6(%[src]) \n"
+ "lwx $t9, $t7(%[src]) \n"
+
+// t0 = | 34 | 24 | 14 | 04 |
+// t1 = | 35 | 25 | 15 | 05 |
+// t8 = | 36 | 26 | 16 | 06 |
+// t9 = | 37 | 27 | 17 | 07 |
+
+ "precr.qb.ph $s0, $t1, $t0 \n"
+ "precr.qb.ph $s1, $t9, $t8 \n"
+ "precrq.qb.ph $s2, $t1, $t0 \n"
+ "precrq.qb.ph $s3, $t9, $t8 \n"
+
+ // s0 = | 25 | 05 | 24 | 04 |
+ // s1 = | 27 | 07 | 26 | 06 |
+ // s2 = | 35 | 15 | 34 | 14 |
+ // s3 = | 37 | 17 | 36 | 16 |
+
+ "precr.qb.ph $t0, $s1, $s0 \n"
+ "precrq.qb.ph $t1, $s1, $s0 \n"
+ "precr.qb.ph $t8, $s3, $s2 \n"
+ "precrq.qb.ph $t9, $s3, $s2 \n"
+
+ // t0 = | 07 | 06 | 05 | 04 |
+ // t1 = | 27 | 26 | 25 | 24 |
+ // t8 = | 17 | 16 | 15 | 14 |
+ // t9 = | 37 | 36 | 35 | 34 |
+
+ "addu $s0, %[dst], %[dst_stride] \n"
+ "addu $s1, $s0, %[dst_stride] \n"
+ "addu $s2, $s1, %[dst_stride] \n"
+
+ "sw $s4, 0(%[dst]) \n"
+ "sw $t0, 4(%[dst]) \n"
+ "sw $s6, 0($s0) \n"
+ "sw $t8, 4($s0) \n"
+ "sw $s5, 0($s1) \n"
+ "sw $t1, 4($s1) \n"
+ "sw $s7, 0($s2) \n"
+ "sw $t9, 4($s2) \n"
+
+ "addiu $AT, -1 \n"
+ "addiu %[src], 4 \n"
+
+ "bnez $AT, 1b \n"
+ " addu %[dst], $s2, %[dst_stride] \n"
+ "b 2f \n"
+//dst + dst_stride unaligned
+ "11: \n"
+ "lw $t0, 0(%[src]) \n"
+ "lwx $t1, %[src_stride](%[src]) \n"
+ "lwx $t8, $t2(%[src]) \n"
+ "lwx $t9, $t3(%[src]) \n"
+
+// t0 = | 30 | 20 | 10 | 00 |
+// t1 = | 31 | 21 | 11 | 01 |
+// t8 = | 32 | 22 | 12 | 02 |
+// t9 = | 33 | 23 | 13 | 03 |
+
+ "precr.qb.ph $s0, $t1, $t0 \n"
+ "precr.qb.ph $s1, $t9, $t8 \n"
+ "precrq.qb.ph $s2, $t1, $t0 \n"
+ "precrq.qb.ph $s3, $t9, $t8 \n"
+
+ // s0 = | 21 | 01 | 20 | 00 |
+ // s1 = | 23 | 03 | 22 | 02 |
+ // s2 = | 31 | 11 | 30 | 10 |
+ // s3 = | 33 | 13 | 32 | 12 |
+
+ "precr.qb.ph $s4, $s1, $s0 \n"
+ "precrq.qb.ph $s5, $s1, $s0 \n"
+ "precr.qb.ph $s6, $s3, $s2 \n"
+ "precrq.qb.ph $s7, $s3, $s2 \n"
+
+ // s4 = | 03 | 02 | 01 | 00 |
+ // s5 = | 23 | 22 | 21 | 20 |
+ // s6 = | 13 | 12 | 11 | 10 |
+ // s7 = | 33 | 32 | 31 | 30 |
+
+ "lwx $t0, $t4(%[src]) \n"
+ "lwx $t1, $t5(%[src]) \n"
+ "lwx $t8, $t6(%[src]) \n"
+ "lwx $t9, $t7(%[src]) \n"
+
+// t0 = | 34 | 24 | 14 | 04 |
+// t1 = | 35 | 25 | 15 | 05 |
+// t8 = | 36 | 26 | 16 | 06 |
+// t9 = | 37 | 27 | 17 | 07 |
+
+ "precr.qb.ph $s0, $t1, $t0 \n"
+ "precr.qb.ph $s1, $t9, $t8 \n"
+ "precrq.qb.ph $s2, $t1, $t0 \n"
+ "precrq.qb.ph $s3, $t9, $t8 \n"
+
+ // s0 = | 25 | 05 | 24 | 04 |
+ // s1 = | 27 | 07 | 26 | 06 |
+ // s2 = | 35 | 15 | 34 | 14 |
+ // s3 = | 37 | 17 | 36 | 16 |
+
+ "precr.qb.ph $t0, $s1, $s0 \n"
+ "precrq.qb.ph $t1, $s1, $s0 \n"
+ "precr.qb.ph $t8, $s3, $s2 \n"
+ "precrq.qb.ph $t9, $s3, $s2 \n"
+
+ // t0 = | 07 | 06 | 05 | 04 |
+ // t1 = | 27 | 26 | 25 | 24 |
+ // t8 = | 17 | 16 | 15 | 14 |
+ // t9 = | 37 | 36 | 35 | 34 |
+
+ "addu $s0, %[dst], %[dst_stride] \n"
+ "addu $s1, $s0, %[dst_stride] \n"
+ "addu $s2, $s1, %[dst_stride] \n"
+
+ "swr $s4, 0(%[dst]) \n"
+ "swl $s4, 3(%[dst]) \n"
+ "swr $t0, 4(%[dst]) \n"
+ "swl $t0, 7(%[dst]) \n"
+ "swr $s6, 0($s0) \n"
+ "swl $s6, 3($s0) \n"
+ "swr $t8, 4($s0) \n"
+ "swl $t8, 7($s0) \n"
+ "swr $s5, 0($s1) \n"
+ "swl $s5, 3($s1) \n"
+ "swr $t1, 4($s1) \n"
+ "swl $t1, 7($s1) \n"
+ "swr $s7, 0($s2) \n"
+ "swl $s7, 3($s2) \n"
+ "swr $t9, 4($s2) \n"
+ "swl $t9, 7($s2) \n"
+
+ "addiu $AT, -1 \n"
+ "addiu %[src], 4 \n"
+
+ "bnez $AT, 11b \n"
+ " addu %[dst], $s2, %[dst_stride] \n"
+ "2: \n"
+ ".set pop \n"
+ ".set at \n"
+ :[src] "+r" (src),
+ [dst] "+r" (dst),
+ [width] "+r" (width)
+ :[src_stride] "r" (src_stride),
+ [dst_stride] "r" (dst_stride)
+ : "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "t9",
+ "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7"
+ );
+}
+
+void TransposeUVWx8_MIPS_DSPR2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "beqz %[width], 2f \n"
+ " sll $t2, %[src_stride], 0x1 \n" // src_stride x 2
+ "sll $t4, %[src_stride], 0x2 \n" // src_stride x 4
+ "sll $t9, %[src_stride], 0x3 \n" // src_stride x 8
+ "addu $t3, $t2, %[src_stride] \n"
+ "addu $t5, $t4, %[src_stride] \n"
+ "addu $t6, $t2, $t4 \n"
+ "subu $t7, $t9, %[src_stride] \n"
+ "srl $t1, %[width], 1 \n"
+
+// check word aligment for dst_a, dst_b, dst_stride_a and dst_stride_b
+ "andi $t0, %[dst_a], 0x3 \n"
+ "andi $t8, %[dst_b], 0x3 \n"
+ "or $t0, $t0, $t8 \n"
+ "andi $t8, %[dst_stride_a], 0x3 \n"
+ "andi $s5, %[dst_stride_b], 0x3 \n"
+ "or $t8, $t8, $s5 \n"
+ "or $t0, $t0, $t8 \n"
+ "bnez $t0, 11f \n"
+ " nop \n"
+// dst + dst_stride word aligned (both, a & b dst addresses)
+ "1: \n"
+ "lw $t0, 0(%[src]) \n" // |B0|A0|b0|a0|
+ "lwx $t8, %[src_stride](%[src]) \n" // |B1|A1|b1|a1|
+ "addu $s5, %[dst_a], %[dst_stride_a] \n"
+ "lwx $t9, $t2(%[src]) \n" // |B2|A2|b2|a2|
+ "lwx $s0, $t3(%[src]) \n" // |B3|A3|b3|a3|
+ "addu $s6, %[dst_b], %[dst_stride_b] \n"
+
+ "precrq.ph.w $s1, $t8, $t0 \n" // |B1|A1|B0|A0|
+ "precrq.ph.w $s2, $s0, $t9 \n" // |B3|A3|B2|A2|
+ "precr.qb.ph $s3, $s2, $s1 \n" // |A3|A2|A1|A0|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |B3|B2|B1|B0|
+
+ "sll $t0, $t0, 16 \n"
+ "packrl.ph $s1, $t8, $t0 \n" // |b1|a1|b0|a0|
+ "sll $t9, $t9, 16 \n"
+ "packrl.ph $s2, $s0, $t9 \n" // |b3|a3|b2|a2|
+
+ "sw $s3, 0($s5) \n"
+ "sw $s4, 0($s6) \n"
+
+ "precr.qb.ph $s3, $s2, $s1 \n" // |a3|a2|a1|a0|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |b3|b2|b1|b0|
+
+ "lwx $t0, $t4(%[src]) \n" // |B4|A4|b4|a4|
+ "lwx $t8, $t5(%[src]) \n" // |B5|A5|b5|a5|
+ "lwx $t9, $t6(%[src]) \n" // |B6|A6|b6|a6|
+ "lwx $s0, $t7(%[src]) \n" // |B7|A7|b7|a7|
+ "sw $s3, 0(%[dst_a]) \n"
+ "sw $s4, 0(%[dst_b]) \n"
+
+ "precrq.ph.w $s1, $t8, $t0 \n" // |B5|A5|B4|A4|
+ "precrq.ph.w $s2, $s0, $t9 \n" // |B6|A6|B7|A7|
+ "precr.qb.ph $s3, $s2, $s1 \n" // |A7|A6|A5|A4|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |B7|B6|B5|B4|
+
+ "sll $t0, $t0, 16 \n"
+ "packrl.ph $s1, $t8, $t0 \n" // |b5|a5|b4|a4|
+ "sll $t9, $t9, 16 \n"
+ "packrl.ph $s2, $s0, $t9 \n" // |b7|a7|b6|a6|
+ "sw $s3, 4($s5) \n"
+ "sw $s4, 4($s6) \n"
+
+ "precr.qb.ph $s3, $s2, $s1 \n" // |a7|a6|a5|a4|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |b7|b6|b5|b4|
+
+ "addiu %[src], 4 \n"
+ "addiu $t1, -1 \n"
+ "sll $t0, %[dst_stride_a], 1 \n"
+ "sll $t8, %[dst_stride_b], 1 \n"
+ "sw $s3, 4(%[dst_a]) \n"
+ "sw $s4, 4(%[dst_b]) \n"
+ "addu %[dst_a], %[dst_a], $t0 \n"
+ "bnez $t1, 1b \n"
+ " addu %[dst_b], %[dst_b], $t8 \n"
+ "b 2f \n"
+ " nop \n"
+
+// dst_a or dst_b or dst_stride_a or dst_stride_b not word aligned
+ "11: \n"
+ "lw $t0, 0(%[src]) \n" // |B0|A0|b0|a0|
+ "lwx $t8, %[src_stride](%[src]) \n" // |B1|A1|b1|a1|
+ "addu $s5, %[dst_a], %[dst_stride_a] \n"
+ "lwx $t9, $t2(%[src]) \n" // |B2|A2|b2|a2|
+ "lwx $s0, $t3(%[src]) \n" // |B3|A3|b3|a3|
+ "addu $s6, %[dst_b], %[dst_stride_b] \n"
+
+ "precrq.ph.w $s1, $t8, $t0 \n" // |B1|A1|B0|A0|
+ "precrq.ph.w $s2, $s0, $t9 \n" // |B3|A3|B2|A2|
+ "precr.qb.ph $s3, $s2, $s1 \n" // |A3|A2|A1|A0|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |B3|B2|B1|B0|
+
+ "sll $t0, $t0, 16 \n"
+ "packrl.ph $s1, $t8, $t0 \n" // |b1|a1|b0|a0|
+ "sll $t9, $t9, 16 \n"
+ "packrl.ph $s2, $s0, $t9 \n" // |b3|a3|b2|a2|
+
+ "swr $s3, 0($s5) \n"
+ "swl $s3, 3($s5) \n"
+ "swr $s4, 0($s6) \n"
+ "swl $s4, 3($s6) \n"
+
+ "precr.qb.ph $s3, $s2, $s1 \n" // |a3|a2|a1|a0|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |b3|b2|b1|b0|
+
+ "lwx $t0, $t4(%[src]) \n" // |B4|A4|b4|a4|
+ "lwx $t8, $t5(%[src]) \n" // |B5|A5|b5|a5|
+ "lwx $t9, $t6(%[src]) \n" // |B6|A6|b6|a6|
+ "lwx $s0, $t7(%[src]) \n" // |B7|A7|b7|a7|
+ "swr $s3, 0(%[dst_a]) \n"
+ "swl $s3, 3(%[dst_a]) \n"
+ "swr $s4, 0(%[dst_b]) \n"
+ "swl $s4, 3(%[dst_b]) \n"
+
+ "precrq.ph.w $s1, $t8, $t0 \n" // |B5|A5|B4|A4|
+ "precrq.ph.w $s2, $s0, $t9 \n" // |B6|A6|B7|A7|
+ "precr.qb.ph $s3, $s2, $s1 \n" // |A7|A6|A5|A4|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |B7|B6|B5|B4|
+
+ "sll $t0, $t0, 16 \n"
+ "packrl.ph $s1, $t8, $t0 \n" // |b5|a5|b4|a4|
+ "sll $t9, $t9, 16 \n"
+ "packrl.ph $s2, $s0, $t9 \n" // |b7|a7|b6|a6|
+
+ "swr $s3, 4($s5) \n"
+ "swl $s3, 7($s5) \n"
+ "swr $s4, 4($s6) \n"
+ "swl $s4, 7($s6) \n"
+
+ "precr.qb.ph $s3, $s2, $s1 \n" // |a7|a6|a5|a4|
+ "precrq.qb.ph $s4, $s2, $s1 \n" // |b7|b6|b5|b4|
+
+ "addiu %[src], 4 \n"
+ "addiu $t1, -1 \n"
+ "sll $t0, %[dst_stride_a], 1 \n"
+ "sll $t8, %[dst_stride_b], 1 \n"
+ "swr $s3, 4(%[dst_a]) \n"
+ "swl $s3, 7(%[dst_a]) \n"
+ "swr $s4, 4(%[dst_b]) \n"
+ "swl $s4, 7(%[dst_b]) \n"
+ "addu %[dst_a], %[dst_a], $t0 \n"
+ "bnez $t1, 11b \n"
+ " addu %[dst_b], %[dst_b], $t8 \n"
+
+ "2: \n"
+ ".set pop \n"
+ : [src] "+r" (src),
+ [dst_a] "+r" (dst_a),
+ [dst_b] "+r" (dst_b),
+ [width] "+r" (width),
+ [src_stride] "+r" (src_stride)
+ : [dst_stride_a] "r" (dst_stride_a),
+ [dst_stride_b] "r" (dst_stride_b)
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9",
+ "s0", "s1", "s2", "s3",
+ "s4", "s5", "s6"
+ );
+}
+
+#endif // defined(__mips_dsp) && (__mips_dsp_rev >= 2)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_neon.cc b/media/libaom/src/third_party/libyuv/source/rotate_neon.cc
new file mode 100644
index 000000000..76043b3b3
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_neon.cc
@@ -0,0 +1,535 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/rotate_row.h"
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
+ !defined(__aarch64__)
+
+static uvec8 kVTbl4x4Transpose =
+ { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 };
+
+void TransposeWx8_NEON(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride,
+ int width) {
+ const uint8* src_temp = NULL;
+ asm volatile (
+ // loops are on blocks of 8. loop will stop when
+ // counter gets to or below 0. starting the counter
+ // at w-8 allow for this
+ "sub %5, #8 \n"
+
+ // handle 8x8 blocks. this should be the majority of the plane
+ ".p2align 2 \n"
+ "1: \n"
+ "mov %0, %1 \n"
+
+ MEMACCESS(0)
+ "vld1.8 {d0}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d1}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d2}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d3}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d4}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d5}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d6}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.8 {d7}, [%0] \n"
+
+ "vtrn.8 d1, d0 \n"
+ "vtrn.8 d3, d2 \n"
+ "vtrn.8 d5, d4 \n"
+ "vtrn.8 d7, d6 \n"
+
+ "vtrn.16 d1, d3 \n"
+ "vtrn.16 d0, d2 \n"
+ "vtrn.16 d5, d7 \n"
+ "vtrn.16 d4, d6 \n"
+
+ "vtrn.32 d1, d5 \n"
+ "vtrn.32 d0, d4 \n"
+ "vtrn.32 d3, d7 \n"
+ "vtrn.32 d2, d6 \n"
+
+ "vrev16.8 q0, q0 \n"
+ "vrev16.8 q1, q1 \n"
+ "vrev16.8 q2, q2 \n"
+ "vrev16.8 q3, q3 \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "vst1.8 {d1}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d0}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d3}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d2}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d5}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d4}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d7}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d6}, [%0] \n"
+
+ "add %1, #8 \n" // src += 8
+ "add %3, %3, %4, lsl #3 \n" // dst += 8 * dst_stride
+ "subs %5, #8 \n" // w -= 8
+ "bge 1b \n"
+
+ // add 8 back to counter. if the result is 0 there are
+ // no residuals.
+ "adds %5, #8 \n"
+ "beq 4f \n"
+
+ // some residual, so between 1 and 7 lines left to transpose
+ "cmp %5, #2 \n"
+ "blt 3f \n"
+
+ "cmp %5, #4 \n"
+ "blt 2f \n"
+
+ // 4x8 block
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "vld1.32 {d0[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d0[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d1[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d1[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d2[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d2[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d3[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.32 {d3[1]}, [%0] \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(6)
+ "vld1.8 {q3}, [%6] \n"
+
+ "vtbl.8 d4, {d0, d1}, d6 \n"
+ "vtbl.8 d5, {d0, d1}, d7 \n"
+ "vtbl.8 d0, {d2, d3}, d6 \n"
+ "vtbl.8 d1, {d2, d3}, d7 \n"
+
+ // TODO(frkoenig): Rework shuffle above to
+ // write out with 4 instead of 8 writes.
+ MEMACCESS(0)
+ "vst1.32 {d4[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d4[1]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d5[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d5[1]}, [%0] \n"
+
+ "add %0, %3, #4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d0[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d0[1]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d1[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d1[1]}, [%0] \n"
+
+ "add %1, #4 \n" // src += 4
+ "add %3, %3, %4, lsl #2 \n" // dst += 4 * dst_stride
+ "subs %5, #4 \n" // w -= 4
+ "beq 4f \n"
+
+ // some residual, check to see if it includes a 2x8 block,
+ // or less
+ "cmp %5, #2 \n"
+ "blt 3f \n"
+
+ // 2x8 block
+ "2: \n"
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "vld1.16 {d0[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d1[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d0[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d1[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d0[2]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d1[2]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d0[3]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.16 {d1[3]}, [%0] \n"
+
+ "vtrn.8 d0, d1 \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "vst1.64 {d0}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.64 {d1}, [%0] \n"
+
+ "add %1, #2 \n" // src += 2
+ "add %3, %3, %4, lsl #1 \n" // dst += 2 * dst_stride
+ "subs %5, #2 \n" // w -= 2
+ "beq 4f \n"
+
+ // 1x8 block
+ "3: \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[0]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[1]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[2]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[3]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[4]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[5]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[6]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld1.8 {d0[7]}, [%1] \n"
+
+ MEMACCESS(3)
+ "vst1.64 {d0}, [%3] \n"
+
+ "4: \n"
+
+ : "+r"(src_temp), // %0
+ "+r"(src), // %1
+ "+r"(src_stride), // %2
+ "+r"(dst), // %3
+ "+r"(dst_stride), // %4
+ "+r"(width) // %5
+ : "r"(&kVTbl4x4Transpose) // %6
+ : "memory", "cc", "q0", "q1", "q2", "q3"
+ );
+}
+
+static uvec8 kVTbl4x4TransposeDi =
+ { 0, 8, 1, 9, 2, 10, 3, 11, 4, 12, 5, 13, 6, 14, 7, 15 };
+
+void TransposeUVWx8_NEON(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width) {
+ const uint8* src_temp = NULL;
+ asm volatile (
+ // loops are on blocks of 8. loop will stop when
+ // counter gets to or below 0. starting the counter
+ // at w-8 allow for this
+ "sub %7, #8 \n"
+
+ // handle 8x8 blocks. this should be the majority of the plane
+ ".p2align 2 \n"
+ "1: \n"
+ "mov %0, %1 \n"
+
+ MEMACCESS(0)
+ "vld2.8 {d0, d1}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d2, d3}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d4, d5}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d6, d7}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d16, d17}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d18, d19}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d20, d21}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.8 {d22, d23}, [%0] \n"
+
+ "vtrn.8 q1, q0 \n"
+ "vtrn.8 q3, q2 \n"
+ "vtrn.8 q9, q8 \n"
+ "vtrn.8 q11, q10 \n"
+
+ "vtrn.16 q1, q3 \n"
+ "vtrn.16 q0, q2 \n"
+ "vtrn.16 q9, q11 \n"
+ "vtrn.16 q8, q10 \n"
+
+ "vtrn.32 q1, q9 \n"
+ "vtrn.32 q0, q8 \n"
+ "vtrn.32 q3, q11 \n"
+ "vtrn.32 q2, q10 \n"
+
+ "vrev16.8 q0, q0 \n"
+ "vrev16.8 q1, q1 \n"
+ "vrev16.8 q2, q2 \n"
+ "vrev16.8 q3, q3 \n"
+ "vrev16.8 q8, q8 \n"
+ "vrev16.8 q9, q9 \n"
+ "vrev16.8 q10, q10 \n"
+ "vrev16.8 q11, q11 \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "vst1.8 {d2}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d0}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d6}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d4}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d18}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d16}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d22}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.8 {d20}, [%0] \n"
+
+ "mov %0, %5 \n"
+
+ MEMACCESS(0)
+ "vst1.8 {d3}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d1}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d7}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d5}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d19}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d17}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d23}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.8 {d21}, [%0] \n"
+
+ "add %1, #8*2 \n" // src += 8*2
+ "add %3, %3, %4, lsl #3 \n" // dst_a += 8 * dst_stride_a
+ "add %5, %5, %6, lsl #3 \n" // dst_b += 8 * dst_stride_b
+ "subs %7, #8 \n" // w -= 8
+ "bge 1b \n"
+
+ // add 8 back to counter. if the result is 0 there are
+ // no residuals.
+ "adds %7, #8 \n"
+ "beq 4f \n"
+
+ // some residual, so between 1 and 7 lines left to transpose
+ "cmp %7, #2 \n"
+ "blt 3f \n"
+
+ "cmp %7, #4 \n"
+ "blt 2f \n"
+
+ // TODO(frkoenig): Clean this up
+ // 4x8 block
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "vld1.64 {d0}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d1}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d2}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d3}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d4}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d5}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d6}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld1.64 {d7}, [%0] \n"
+
+ MEMACCESS(8)
+ "vld1.8 {q15}, [%8] \n"
+
+ "vtrn.8 q0, q1 \n"
+ "vtrn.8 q2, q3 \n"
+
+ "vtbl.8 d16, {d0, d1}, d30 \n"
+ "vtbl.8 d17, {d0, d1}, d31 \n"
+ "vtbl.8 d18, {d2, d3}, d30 \n"
+ "vtbl.8 d19, {d2, d3}, d31 \n"
+ "vtbl.8 d20, {d4, d5}, d30 \n"
+ "vtbl.8 d21, {d4, d5}, d31 \n"
+ "vtbl.8 d22, {d6, d7}, d30 \n"
+ "vtbl.8 d23, {d6, d7}, d31 \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "vst1.32 {d16[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d16[1]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d17[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d17[1]}, [%0], %4 \n"
+
+ "add %0, %3, #4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d20[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d20[1]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d21[0]}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d21[1]}, [%0] \n"
+
+ "mov %0, %5 \n"
+
+ MEMACCESS(0)
+ "vst1.32 {d18[0]}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.32 {d18[1]}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.32 {d19[0]}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.32 {d19[1]}, [%0], %6 \n"
+
+ "add %0, %5, #4 \n"
+ MEMACCESS(0)
+ "vst1.32 {d22[0]}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.32 {d22[1]}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.32 {d23[0]}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.32 {d23[1]}, [%0] \n"
+
+ "add %1, #4*2 \n" // src += 4 * 2
+ "add %3, %3, %4, lsl #2 \n" // dst_a += 4 * dst_stride_a
+ "add %5, %5, %6, lsl #2 \n" // dst_b += 4 * dst_stride_b
+ "subs %7, #4 \n" // w -= 4
+ "beq 4f \n"
+
+ // some residual, check to see if it includes a 2x8 block,
+ // or less
+ "cmp %7, #2 \n"
+ "blt 3f \n"
+
+ // 2x8 block
+ "2: \n"
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "vld2.16 {d0[0], d2[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d1[0], d3[0]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d0[1], d2[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d1[1], d3[1]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d0[2], d2[2]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d1[2], d3[2]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d0[3], d2[3]}, [%0], %2 \n"
+ MEMACCESS(0)
+ "vld2.16 {d1[3], d3[3]}, [%0] \n"
+
+ "vtrn.8 d0, d1 \n"
+ "vtrn.8 d2, d3 \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "vst1.64 {d0}, [%0], %4 \n"
+ MEMACCESS(0)
+ "vst1.64 {d2}, [%0] \n"
+
+ "mov %0, %5 \n"
+
+ MEMACCESS(0)
+ "vst1.64 {d1}, [%0], %6 \n"
+ MEMACCESS(0)
+ "vst1.64 {d3}, [%0] \n"
+
+ "add %1, #2*2 \n" // src += 2 * 2
+ "add %3, %3, %4, lsl #1 \n" // dst_a += 2 * dst_stride_a
+ "add %5, %5, %6, lsl #1 \n" // dst_b += 2 * dst_stride_b
+ "subs %7, #2 \n" // w -= 2
+ "beq 4f \n"
+
+ // 1x8 block
+ "3: \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[0], d1[0]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[1], d1[1]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[2], d1[2]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[3], d1[3]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[4], d1[4]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[5], d1[5]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[6], d1[6]}, [%1], %2 \n"
+ MEMACCESS(1)
+ "vld2.8 {d0[7], d1[7]}, [%1] \n"
+
+ MEMACCESS(3)
+ "vst1.64 {d0}, [%3] \n"
+ MEMACCESS(5)
+ "vst1.64 {d1}, [%5] \n"
+
+ "4: \n"
+
+ : "+r"(src_temp), // %0
+ "+r"(src), // %1
+ "+r"(src_stride), // %2
+ "+r"(dst_a), // %3
+ "+r"(dst_stride_a), // %4
+ "+r"(dst_b), // %5
+ "+r"(dst_stride_b), // %6
+ "+r"(width) // %7
+ : "r"(&kVTbl4x4TransposeDi) // %8
+ : "memory", "cc",
+ "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
+ );
+}
+#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_neon64.cc b/media/libaom/src/third_party/libyuv/source/rotate_neon64.cc
new file mode 100644
index 000000000..f52c082b3
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_neon64.cc
@@ -0,0 +1,543 @@
+/*
+ * Copyright 2014 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/rotate_row.h"
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC Neon armv8 64 bit.
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+static uvec8 kVTbl4x4Transpose =
+ { 0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15 };
+
+void TransposeWx8_NEON(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ const uint8* src_temp = NULL;
+ int64 width64 = (int64) width; // Work around clang 3.4 warning.
+ asm volatile (
+ // loops are on blocks of 8. loop will stop when
+ // counter gets to or below 0. starting the counter
+ // at w-8 allow for this
+ "sub %3, %3, #8 \n"
+
+ // handle 8x8 blocks. this should be the majority of the plane
+ "1: \n"
+ "mov %0, %1 \n"
+
+ MEMACCESS(0)
+ "ld1 {v0.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v2.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v3.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v4.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v5.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v6.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v7.8b}, [%0] \n"
+
+ "trn2 v16.8b, v0.8b, v1.8b \n"
+ "trn1 v17.8b, v0.8b, v1.8b \n"
+ "trn2 v18.8b, v2.8b, v3.8b \n"
+ "trn1 v19.8b, v2.8b, v3.8b \n"
+ "trn2 v20.8b, v4.8b, v5.8b \n"
+ "trn1 v21.8b, v4.8b, v5.8b \n"
+ "trn2 v22.8b, v6.8b, v7.8b \n"
+ "trn1 v23.8b, v6.8b, v7.8b \n"
+
+ "trn2 v3.4h, v17.4h, v19.4h \n"
+ "trn1 v1.4h, v17.4h, v19.4h \n"
+ "trn2 v2.4h, v16.4h, v18.4h \n"
+ "trn1 v0.4h, v16.4h, v18.4h \n"
+ "trn2 v7.4h, v21.4h, v23.4h \n"
+ "trn1 v5.4h, v21.4h, v23.4h \n"
+ "trn2 v6.4h, v20.4h, v22.4h \n"
+ "trn1 v4.4h, v20.4h, v22.4h \n"
+
+ "trn2 v21.2s, v1.2s, v5.2s \n"
+ "trn1 v17.2s, v1.2s, v5.2s \n"
+ "trn2 v20.2s, v0.2s, v4.2s \n"
+ "trn1 v16.2s, v0.2s, v4.2s \n"
+ "trn2 v23.2s, v3.2s, v7.2s \n"
+ "trn1 v19.2s, v3.2s, v7.2s \n"
+ "trn2 v22.2s, v2.2s, v6.2s \n"
+ "trn1 v18.2s, v2.2s, v6.2s \n"
+
+ "mov %0, %2 \n"
+
+ MEMACCESS(0)
+ "st1 {v17.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v16.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v19.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v18.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v21.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v20.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v23.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v22.8b}, [%0] \n"
+
+ "add %1, %1, #8 \n" // src += 8
+ "add %2, %2, %6, lsl #3 \n" // dst += 8 * dst_stride
+ "subs %3, %3, #8 \n" // w -= 8
+ "b.ge 1b \n"
+
+ // add 8 back to counter. if the result is 0 there are
+ // no residuals.
+ "adds %3, %3, #8 \n"
+ "b.eq 4f \n"
+
+ // some residual, so between 1 and 7 lines left to transpose
+ "cmp %3, #2 \n"
+ "b.lt 3f \n"
+
+ "cmp %3, #4 \n"
+ "b.lt 2f \n"
+
+ // 4x8 block
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[0], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[1], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[2], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[3], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.s}[0], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.s}[1], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.s}[2], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.s}[3], [%0] \n"
+
+ "mov %0, %2 \n"
+
+ MEMACCESS(4)
+ "ld1 {v2.16b}, [%4] \n"
+
+ "tbl v3.16b, {v0.16b}, v2.16b \n"
+ "tbl v0.16b, {v1.16b}, v2.16b \n"
+
+ // TODO(frkoenig): Rework shuffle above to
+ // write out with 4 instead of 8 writes.
+ MEMACCESS(0)
+ "st1 {v3.s}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v3.s}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v3.s}[2], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v3.s}[3], [%0] \n"
+
+ "add %0, %2, #4 \n"
+ MEMACCESS(0)
+ "st1 {v0.s}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v0.s}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v0.s}[2], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v0.s}[3], [%0] \n"
+
+ "add %1, %1, #4 \n" // src += 4
+ "add %2, %2, %6, lsl #2 \n" // dst += 4 * dst_stride
+ "subs %3, %3, #4 \n" // w -= 4
+ "b.eq 4f \n"
+
+ // some residual, check to see if it includes a 2x8 block,
+ // or less
+ "cmp %3, #2 \n"
+ "b.lt 3f \n"
+
+ // 2x8 block
+ "2: \n"
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "ld1 {v0.h}[0], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.h}[0], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v0.h}[1], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.h}[1], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v0.h}[2], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.h}[2], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v0.h}[3], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.h}[3], [%0] \n"
+
+ "trn2 v2.8b, v0.8b, v1.8b \n"
+ "trn1 v3.8b, v0.8b, v1.8b \n"
+
+ "mov %0, %2 \n"
+
+ MEMACCESS(0)
+ "st1 {v3.8b}, [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v2.8b}, [%0] \n"
+
+ "add %1, %1, #2 \n" // src += 2
+ "add %2, %2, %6, lsl #1 \n" // dst += 2 * dst_stride
+ "subs %3, %3, #2 \n" // w -= 2
+ "b.eq 4f \n"
+
+ // 1x8 block
+ "3: \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[0], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[1], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[2], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[3], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[4], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[5], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[6], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld1 {v0.b}[7], [%1] \n"
+
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2] \n"
+
+ "4: \n"
+
+ : "+r"(src_temp), // %0
+ "+r"(src), // %1
+ "+r"(dst), // %2
+ "+r"(width64) // %3
+ : "r"(&kVTbl4x4Transpose), // %4
+ "r"(static_cast<ptrdiff_t>(src_stride)), // %5
+ "r"(static_cast<ptrdiff_t>(dst_stride)) // %6
+ : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16",
+ "v17", "v18", "v19", "v20", "v21", "v22", "v23"
+ );
+}
+
+static uint8 kVTbl4x4TransposeDi[32] =
+ { 0, 16, 32, 48, 2, 18, 34, 50, 4, 20, 36, 52, 6, 22, 38, 54,
+ 1, 17, 33, 49, 3, 19, 35, 51, 5, 21, 37, 53, 7, 23, 39, 55};
+
+void TransposeUVWx8_NEON(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int width) {
+ const uint8* src_temp = NULL;
+ int64 width64 = (int64) width; // Work around clang 3.4 warning.
+ asm volatile (
+ // loops are on blocks of 8. loop will stop when
+ // counter gets to or below 0. starting the counter
+ // at w-8 allow for this
+ "sub %4, %4, #8 \n"
+
+ // handle 8x8 blocks. this should be the majority of the plane
+ "1: \n"
+ "mov %0, %1 \n"
+
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v2.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v3.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v4.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v5.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v6.16b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v7.16b}, [%0] \n"
+
+ "trn1 v16.16b, v0.16b, v1.16b \n"
+ "trn2 v17.16b, v0.16b, v1.16b \n"
+ "trn1 v18.16b, v2.16b, v3.16b \n"
+ "trn2 v19.16b, v2.16b, v3.16b \n"
+ "trn1 v20.16b, v4.16b, v5.16b \n"
+ "trn2 v21.16b, v4.16b, v5.16b \n"
+ "trn1 v22.16b, v6.16b, v7.16b \n"
+ "trn2 v23.16b, v6.16b, v7.16b \n"
+
+ "trn1 v0.8h, v16.8h, v18.8h \n"
+ "trn2 v1.8h, v16.8h, v18.8h \n"
+ "trn1 v2.8h, v20.8h, v22.8h \n"
+ "trn2 v3.8h, v20.8h, v22.8h \n"
+ "trn1 v4.8h, v17.8h, v19.8h \n"
+ "trn2 v5.8h, v17.8h, v19.8h \n"
+ "trn1 v6.8h, v21.8h, v23.8h \n"
+ "trn2 v7.8h, v21.8h, v23.8h \n"
+
+ "trn1 v16.4s, v0.4s, v2.4s \n"
+ "trn2 v17.4s, v0.4s, v2.4s \n"
+ "trn1 v18.4s, v1.4s, v3.4s \n"
+ "trn2 v19.4s, v1.4s, v3.4s \n"
+ "trn1 v20.4s, v4.4s, v6.4s \n"
+ "trn2 v21.4s, v4.4s, v6.4s \n"
+ "trn1 v22.4s, v5.4s, v7.4s \n"
+ "trn2 v23.4s, v5.4s, v7.4s \n"
+
+ "mov %0, %2 \n"
+
+ MEMACCESS(0)
+ "st1 {v16.d}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v18.d}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v17.d}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v19.d}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v16.d}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v18.d}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v17.d}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v19.d}[1], [%0] \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "st1 {v20.d}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v22.d}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v21.d}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v23.d}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v20.d}[1], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v22.d}[1], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v21.d}[1], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v23.d}[1], [%0] \n"
+
+ "add %1, %1, #16 \n" // src += 8*2
+ "add %2, %2, %6, lsl #3 \n" // dst_a += 8 * dst_stride_a
+ "add %3, %3, %7, lsl #3 \n" // dst_b += 8 * dst_stride_b
+ "subs %4, %4, #8 \n" // w -= 8
+ "b.ge 1b \n"
+
+ // add 8 back to counter. if the result is 0 there are
+ // no residuals.
+ "adds %4, %4, #8 \n"
+ "b.eq 4f \n"
+
+ // some residual, so between 1 and 7 lines left to transpose
+ "cmp %4, #2 \n"
+ "b.lt 3f \n"
+
+ "cmp %4, #4 \n"
+ "b.lt 2f \n"
+
+ // TODO(frkoenig): Clean this up
+ // 4x8 block
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "ld1 {v0.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v1.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v2.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v3.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v4.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v5.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v6.8b}, [%0], %5 \n"
+ MEMACCESS(0)
+ "ld1 {v7.8b}, [%0] \n"
+
+ MEMACCESS(8)
+ "ld1 {v30.16b}, [%8], #16 \n"
+ "ld1 {v31.16b}, [%8] \n"
+
+ "tbl v16.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v30.16b \n"
+ "tbl v17.16b, {v0.16b, v1.16b, v2.16b, v3.16b}, v31.16b \n"
+ "tbl v18.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v30.16b \n"
+ "tbl v19.16b, {v4.16b, v5.16b, v6.16b, v7.16b}, v31.16b \n"
+
+ "mov %0, %2 \n"
+
+ MEMACCESS(0)
+ "st1 {v16.s}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v16.s}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v16.s}[2], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v16.s}[3], [%0], %6 \n"
+
+ "add %0, %2, #4 \n"
+ MEMACCESS(0)
+ "st1 {v18.s}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v18.s}[1], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v18.s}[2], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v18.s}[3], [%0] \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "st1 {v17.s}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v17.s}[1], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v17.s}[2], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v17.s}[3], [%0], %7 \n"
+
+ "add %0, %3, #4 \n"
+ MEMACCESS(0)
+ "st1 {v19.s}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v19.s}[1], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v19.s}[2], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v19.s}[3], [%0] \n"
+
+ "add %1, %1, #8 \n" // src += 4 * 2
+ "add %2, %2, %6, lsl #2 \n" // dst_a += 4 * dst_stride_a
+ "add %3, %3, %7, lsl #2 \n" // dst_b += 4 * dst_stride_b
+ "subs %4, %4, #4 \n" // w -= 4
+ "b.eq 4f \n"
+
+ // some residual, check to see if it includes a 2x8 block,
+ // or less
+ "cmp %4, #2 \n"
+ "b.lt 3f \n"
+
+ // 2x8 block
+ "2: \n"
+ "mov %0, %1 \n"
+ MEMACCESS(0)
+ "ld2 {v0.h, v1.h}[0], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v2.h, v3.h}[0], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v0.h, v1.h}[1], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v2.h, v3.h}[1], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v0.h, v1.h}[2], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v2.h, v3.h}[2], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v0.h, v1.h}[3], [%0], %5 \n"
+ MEMACCESS(0)
+ "ld2 {v2.h, v3.h}[3], [%0] \n"
+
+ "trn1 v4.8b, v0.8b, v2.8b \n"
+ "trn2 v5.8b, v0.8b, v2.8b \n"
+ "trn1 v6.8b, v1.8b, v3.8b \n"
+ "trn2 v7.8b, v1.8b, v3.8b \n"
+
+ "mov %0, %2 \n"
+
+ MEMACCESS(0)
+ "st1 {v4.d}[0], [%0], %6 \n"
+ MEMACCESS(0)
+ "st1 {v6.d}[0], [%0] \n"
+
+ "mov %0, %3 \n"
+
+ MEMACCESS(0)
+ "st1 {v5.d}[0], [%0], %7 \n"
+ MEMACCESS(0)
+ "st1 {v7.d}[0], [%0] \n"
+
+ "add %1, %1, #4 \n" // src += 2 * 2
+ "add %2, %2, %6, lsl #1 \n" // dst_a += 2 * dst_stride_a
+ "add %3, %3, %7, lsl #1 \n" // dst_b += 2 * dst_stride_b
+ "subs %4, %4, #2 \n" // w -= 2
+ "b.eq 4f \n"
+
+ // 1x8 block
+ "3: \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[0], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[1], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[2], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[3], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[4], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[5], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[6], [%1], %5 \n"
+ MEMACCESS(1)
+ "ld2 {v0.b, v1.b}[7], [%1] \n"
+
+ MEMACCESS(2)
+ "st1 {v0.d}[0], [%2] \n"
+ MEMACCESS(3)
+ "st1 {v1.d}[0], [%3] \n"
+
+ "4: \n"
+
+ : "+r"(src_temp), // %0
+ "+r"(src), // %1
+ "+r"(dst_a), // %2
+ "+r"(dst_b), // %3
+ "+r"(width64) // %4
+ : "r"(static_cast<ptrdiff_t>(src_stride)), // %5
+ "r"(static_cast<ptrdiff_t>(dst_stride_a)), // %6
+ "r"(static_cast<ptrdiff_t>(dst_stride_b)), // %7
+ "r"(&kVTbl4x4TransposeDi) // %8
+ : "memory", "cc",
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23",
+ "v30", "v31"
+ );
+}
+#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/rotate_win.cc b/media/libaom/src/third_party/libyuv/source/rotate_win.cc
new file mode 100644
index 000000000..2760066df
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/rotate_win.cc
@@ -0,0 +1,248 @@
+/*
+ * Copyright 2013 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/rotate_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for Visual C x86.
+#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \
+ defined(_MSC_VER) && !defined(__clang__)
+
+__declspec(naked)
+void TransposeWx8_SSSE3(const uint8* src, int src_stride,
+ uint8* dst, int dst_stride, int width) {
+ __asm {
+ push edi
+ push esi
+ push ebp
+ mov eax, [esp + 12 + 4] // src
+ mov edi, [esp + 12 + 8] // src_stride
+ mov edx, [esp + 12 + 12] // dst
+ mov esi, [esp + 12 + 16] // dst_stride
+ mov ecx, [esp + 12 + 20] // width
+
+ // Read in the data from the source pointer.
+ // First round of bit swap.
+ align 4
+ convertloop:
+ movq xmm0, qword ptr [eax]
+ lea ebp, [eax + 8]
+ movq xmm1, qword ptr [eax + edi]
+ lea eax, [eax + 2 * edi]
+ punpcklbw xmm0, xmm1
+ movq xmm2, qword ptr [eax]
+ movdqa xmm1, xmm0
+ palignr xmm1, xmm1, 8
+ movq xmm3, qword ptr [eax + edi]
+ lea eax, [eax + 2 * edi]
+ punpcklbw xmm2, xmm3
+ movdqa xmm3, xmm2
+ movq xmm4, qword ptr [eax]
+ palignr xmm3, xmm3, 8
+ movq xmm5, qword ptr [eax + edi]
+ punpcklbw xmm4, xmm5
+ lea eax, [eax + 2 * edi]
+ movdqa xmm5, xmm4
+ movq xmm6, qword ptr [eax]
+ palignr xmm5, xmm5, 8
+ movq xmm7, qword ptr [eax + edi]
+ punpcklbw xmm6, xmm7
+ mov eax, ebp
+ movdqa xmm7, xmm6
+ palignr xmm7, xmm7, 8
+ // Second round of bit swap.
+ punpcklwd xmm0, xmm2
+ punpcklwd xmm1, xmm3
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ palignr xmm2, xmm2, 8
+ palignr xmm3, xmm3, 8
+ punpcklwd xmm4, xmm6
+ punpcklwd xmm5, xmm7
+ movdqa xmm6, xmm4
+ movdqa xmm7, xmm5
+ palignr xmm6, xmm6, 8
+ palignr xmm7, xmm7, 8
+ // Third round of bit swap.
+ // Write to the destination pointer.
+ punpckldq xmm0, xmm4
+ movq qword ptr [edx], xmm0
+ movdqa xmm4, xmm0
+ palignr xmm4, xmm4, 8
+ movq qword ptr [edx + esi], xmm4
+ lea edx, [edx + 2 * esi]
+ punpckldq xmm2, xmm6
+ movdqa xmm6, xmm2
+ palignr xmm6, xmm6, 8
+ movq qword ptr [edx], xmm2
+ punpckldq xmm1, xmm5
+ movq qword ptr [edx + esi], xmm6
+ lea edx, [edx + 2 * esi]
+ movdqa xmm5, xmm1
+ movq qword ptr [edx], xmm1
+ palignr xmm5, xmm5, 8
+ punpckldq xmm3, xmm7
+ movq qword ptr [edx + esi], xmm5
+ lea edx, [edx + 2 * esi]
+ movq qword ptr [edx], xmm3
+ movdqa xmm7, xmm3
+ palignr xmm7, xmm7, 8
+ sub ecx, 8
+ movq qword ptr [edx + esi], xmm7
+ lea edx, [edx + 2 * esi]
+ jg convertloop
+
+ pop ebp
+ pop esi
+ pop edi
+ ret
+ }
+}
+
+__declspec(naked)
+void TransposeUVWx8_SSE2(const uint8* src, int src_stride,
+ uint8* dst_a, int dst_stride_a,
+ uint8* dst_b, int dst_stride_b,
+ int w) {
+ __asm {
+ push ebx
+ push esi
+ push edi
+ push ebp
+ mov eax, [esp + 16 + 4] // src
+ mov edi, [esp + 16 + 8] // src_stride
+ mov edx, [esp + 16 + 12] // dst_a
+ mov esi, [esp + 16 + 16] // dst_stride_a
+ mov ebx, [esp + 16 + 20] // dst_b
+ mov ebp, [esp + 16 + 24] // dst_stride_b
+ mov ecx, esp
+ sub esp, 4 + 16
+ and esp, ~15
+ mov [esp + 16], ecx
+ mov ecx, [ecx + 16 + 28] // w
+
+ align 4
+ convertloop:
+ // Read in the data from the source pointer.
+ // First round of bit swap.
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + edi]
+ lea eax, [eax + 2 * edi]
+ movdqa xmm7, xmm0 // use xmm7 as temp register.
+ punpcklbw xmm0, xmm1
+ punpckhbw xmm7, xmm1
+ movdqa xmm1, xmm7
+ movdqu xmm2, [eax]
+ movdqu xmm3, [eax + edi]
+ lea eax, [eax + 2 * edi]
+ movdqa xmm7, xmm2
+ punpcklbw xmm2, xmm3
+ punpckhbw xmm7, xmm3
+ movdqa xmm3, xmm7
+ movdqu xmm4, [eax]
+ movdqu xmm5, [eax + edi]
+ lea eax, [eax + 2 * edi]
+ movdqa xmm7, xmm4
+ punpcklbw xmm4, xmm5
+ punpckhbw xmm7, xmm5
+ movdqa xmm5, xmm7
+ movdqu xmm6, [eax]
+ movdqu xmm7, [eax + edi]
+ lea eax, [eax + 2 * edi]
+ movdqu [esp], xmm5 // backup xmm5
+ neg edi
+ movdqa xmm5, xmm6 // use xmm5 as temp register.
+ punpcklbw xmm6, xmm7
+ punpckhbw xmm5, xmm7
+ movdqa xmm7, xmm5
+ lea eax, [eax + 8 * edi + 16]
+ neg edi
+ // Second round of bit swap.
+ movdqa xmm5, xmm0
+ punpcklwd xmm0, xmm2
+ punpckhwd xmm5, xmm2
+ movdqa xmm2, xmm5
+ movdqa xmm5, xmm1
+ punpcklwd xmm1, xmm3
+ punpckhwd xmm5, xmm3
+ movdqa xmm3, xmm5
+ movdqa xmm5, xmm4
+ punpcklwd xmm4, xmm6
+ punpckhwd xmm5, xmm6
+ movdqa xmm6, xmm5
+ movdqu xmm5, [esp] // restore xmm5
+ movdqu [esp], xmm6 // backup xmm6
+ movdqa xmm6, xmm5 // use xmm6 as temp register.
+ punpcklwd xmm5, xmm7
+ punpckhwd xmm6, xmm7
+ movdqa xmm7, xmm6
+ // Third round of bit swap.
+ // Write to the destination pointer.
+ movdqa xmm6, xmm0
+ punpckldq xmm0, xmm4
+ punpckhdq xmm6, xmm4
+ movdqa xmm4, xmm6
+ movdqu xmm6, [esp] // restore xmm6
+ movlpd qword ptr [edx], xmm0
+ movhpd qword ptr [ebx], xmm0
+ movlpd qword ptr [edx + esi], xmm4
+ lea edx, [edx + 2 * esi]
+ movhpd qword ptr [ebx + ebp], xmm4
+ lea ebx, [ebx + 2 * ebp]
+ movdqa xmm0, xmm2 // use xmm0 as the temp register.
+ punpckldq xmm2, xmm6
+ movlpd qword ptr [edx], xmm2
+ movhpd qword ptr [ebx], xmm2
+ punpckhdq xmm0, xmm6
+ movlpd qword ptr [edx + esi], xmm0
+ lea edx, [edx + 2 * esi]
+ movhpd qword ptr [ebx + ebp], xmm0
+ lea ebx, [ebx + 2 * ebp]
+ movdqa xmm0, xmm1 // use xmm0 as the temp register.
+ punpckldq xmm1, xmm5
+ movlpd qword ptr [edx], xmm1
+ movhpd qword ptr [ebx], xmm1
+ punpckhdq xmm0, xmm5
+ movlpd qword ptr [edx + esi], xmm0
+ lea edx, [edx + 2 * esi]
+ movhpd qword ptr [ebx + ebp], xmm0
+ lea ebx, [ebx + 2 * ebp]
+ movdqa xmm0, xmm3 // use xmm0 as the temp register.
+ punpckldq xmm3, xmm7
+ movlpd qword ptr [edx], xmm3
+ movhpd qword ptr [ebx], xmm3
+ punpckhdq xmm0, xmm7
+ sub ecx, 8
+ movlpd qword ptr [edx + esi], xmm0
+ lea edx, [edx + 2 * esi]
+ movhpd qword ptr [ebx + ebp], xmm0
+ lea ebx, [ebx + 2 * ebp]
+ jg convertloop
+
+ mov esp, [esp + 16]
+ pop ebp
+ pop edi
+ pop esi
+ pop ebx
+ ret
+ }
+}
+
+#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_any.cc b/media/libaom/src/third_party/libyuv/source/row_any.cc
new file mode 100644
index 000000000..1cb1f6b93
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_any.cc
@@ -0,0 +1,680 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#include <string.h> // For memset.
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Subsampled source needs to be increase by 1 of not even.
+#define SS(width, shift) (((width) + (1 << (shift)) - 1) >> (shift))
+
+// Any 3 planes to 1.
+#define ANY31(NAMEANY, ANY_SIMD, UVSHIFT, DUVSHIFT, BPP, MASK) \
+ void NAMEANY(const uint8* y_buf, const uint8* u_buf, const uint8* v_buf, \
+ uint8* dst_ptr, int width) { \
+ SIMD_ALIGNED(uint8 temp[64 * 4]); \
+ memset(temp, 0, 64 * 3); /* for YUY2 and msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(y_buf, u_buf, v_buf, dst_ptr, n); \
+ } \
+ memcpy(temp, y_buf + n, r); \
+ memcpy(temp + 64, u_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
+ memcpy(temp + 128, v_buf + (n >> UVSHIFT), SS(r, UVSHIFT)); \
+ ANY_SIMD(temp, temp + 64, temp + 128, temp + 192, MASK + 1); \
+ memcpy(dst_ptr + (n >> DUVSHIFT) * BPP, temp + 192, \
+ SS(r, DUVSHIFT) * BPP); \
+ }
+
+#ifdef HAS_I422TOARGBROW_SSSE3
+ANY31(I422ToARGBRow_Any_SSSE3, I422ToARGBRow_SSSE3, 1, 0, 4, 7)
+#endif
+#ifdef HAS_I444TOARGBROW_SSSE3
+ANY31(I444ToARGBRow_Any_SSSE3, I444ToARGBRow_SSSE3, 0, 0, 4, 7)
+ANY31(I411ToARGBRow_Any_SSSE3, I411ToARGBRow_SSSE3, 2, 0, 4, 7)
+ANY31(I422ToBGRARow_Any_SSSE3, I422ToBGRARow_SSSE3, 1, 0, 4, 7)
+ANY31(I422ToABGRRow_Any_SSSE3, I422ToABGRRow_SSSE3, 1, 0, 4, 7)
+ANY31(I422ToRGBARow_Any_SSSE3, I422ToRGBARow_SSSE3, 1, 0, 4, 7)
+ANY31(I422ToARGB4444Row_Any_SSSE3, I422ToARGB4444Row_SSSE3, 1, 0, 2, 7)
+ANY31(I422ToARGB1555Row_Any_SSSE3, I422ToARGB1555Row_SSSE3, 1, 0, 2, 7)
+ANY31(I422ToRGB565Row_Any_SSSE3, I422ToRGB565Row_SSSE3, 1, 0, 2, 7)
+ANY31(I422ToRGB24Row_Any_SSSE3, I422ToRGB24Row_SSSE3, 1, 0, 3, 7)
+ANY31(I422ToRAWRow_Any_SSSE3, I422ToRAWRow_SSSE3, 1, 0, 3, 7)
+ANY31(I422ToYUY2Row_Any_SSE2, I422ToYUY2Row_SSE2, 1, 1, 4, 15)
+ANY31(I422ToUYVYRow_Any_SSE2, I422ToUYVYRow_SSE2, 1, 1, 4, 15)
+#endif // HAS_I444TOARGBROW_SSSE3
+#ifdef HAS_I422TORGB24ROW_AVX2
+ANY31(I422ToRGB24Row_Any_AVX2, I422ToRGB24Row_AVX2, 1, 0, 3, 15)
+#endif
+#ifdef HAS_I422TORAWROW_AVX2
+ANY31(I422ToRAWRow_Any_AVX2, I422ToRAWRow_AVX2, 1, 0, 3, 15)
+#endif
+#ifdef HAS_J422TOARGBROW_SSSE3
+ANY31(J422ToARGBRow_Any_SSSE3, J422ToARGBRow_SSSE3, 1, 0, 4, 7)
+#endif
+#ifdef HAS_J422TOARGBROW_AVX2
+ANY31(J422ToARGBRow_Any_AVX2, J422ToARGBRow_AVX2, 1, 0, 4, 15)
+#endif
+#ifdef HAS_I422TOARGBROW_AVX2
+ANY31(I422ToARGBRow_Any_AVX2, I422ToARGBRow_AVX2, 1, 0, 4, 15)
+#endif
+#ifdef HAS_I422TOBGRAROW_AVX2
+ANY31(I422ToBGRARow_Any_AVX2, I422ToBGRARow_AVX2, 1, 0, 4, 15)
+#endif
+#ifdef HAS_I422TORGBAROW_AVX2
+ANY31(I422ToRGBARow_Any_AVX2, I422ToRGBARow_AVX2, 1, 0, 4, 15)
+#endif
+#ifdef HAS_I422TOABGRROW_AVX2
+ANY31(I422ToABGRRow_Any_AVX2, I422ToABGRRow_AVX2, 1, 0, 4, 15)
+#endif
+#ifdef HAS_I444TOARGBROW_AVX2
+ANY31(I444ToARGBRow_Any_AVX2, I444ToARGBRow_AVX2, 0, 0, 4, 15)
+#endif
+#ifdef HAS_I411TOARGBROW_AVX2
+ANY31(I411ToARGBRow_Any_AVX2, I411ToARGBRow_AVX2, 2, 0, 4, 15)
+#endif
+#ifdef HAS_I422TOARGB4444ROW_AVX2
+ANY31(I422ToARGB4444Row_Any_AVX2, I422ToARGB4444Row_AVX2, 1, 0, 2, 7)
+#endif
+#ifdef HAS_I422TOARGB1555ROW_AVX2
+ANY31(I422ToARGB1555Row_Any_AVX2, I422ToARGB1555Row_AVX2, 1, 0, 2, 7)
+#endif
+#ifdef HAS_I422TORGB565ROW_AVX2
+ANY31(I422ToRGB565Row_Any_AVX2, I422ToRGB565Row_AVX2, 1, 0, 2, 7)
+#endif
+#ifdef HAS_I422TOARGBROW_NEON
+ANY31(I444ToARGBRow_Any_NEON, I444ToARGBRow_NEON, 0, 0, 4, 7)
+ANY31(I422ToARGBRow_Any_NEON, I422ToARGBRow_NEON, 1, 0, 4, 7)
+ANY31(I411ToARGBRow_Any_NEON, I411ToARGBRow_NEON, 2, 0, 4, 7)
+ANY31(I422ToBGRARow_Any_NEON, I422ToBGRARow_NEON, 1, 0, 4, 7)
+ANY31(I422ToABGRRow_Any_NEON, I422ToABGRRow_NEON, 1, 0, 4, 7)
+ANY31(I422ToRGBARow_Any_NEON, I422ToRGBARow_NEON, 1, 0, 4, 7)
+ANY31(I422ToRGB24Row_Any_NEON, I422ToRGB24Row_NEON, 1, 0, 3, 7)
+ANY31(I422ToRAWRow_Any_NEON, I422ToRAWRow_NEON, 1, 0, 3, 7)
+ANY31(I422ToARGB4444Row_Any_NEON, I422ToARGB4444Row_NEON, 1, 0, 2, 7)
+ANY31(I422ToARGB1555Row_Any_NEON, I422ToARGB1555Row_NEON, 1, 0, 2, 7)
+ANY31(I422ToRGB565Row_Any_NEON, I422ToRGB565Row_NEON, 1, 0, 2, 7)
+#endif
+#ifdef HAS_I422TOYUY2ROW_NEON
+ANY31(I422ToYUY2Row_Any_NEON, I422ToYUY2Row_NEON, 1, 1, 4, 15)
+#endif
+#ifdef HAS_I422TOUYVYROW_NEON
+ANY31(I422ToUYVYRow_Any_NEON, I422ToUYVYRow_NEON, 1, 1, 4, 15)
+#endif
+#undef ANY31
+
+// Any 2 planes to 1.
+#define ANY21(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, SBPP2, BPP, MASK) \
+ void NAMEANY(const uint8* y_buf, const uint8* uv_buf, \
+ uint8* dst_ptr, int width) { \
+ SIMD_ALIGNED(uint8 temp[64 * 3]); \
+ memset(temp, 0, 64 * 2); /* for msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(y_buf, uv_buf, dst_ptr, n); \
+ } \
+ memcpy(temp, y_buf + n * SBPP, r * SBPP); \
+ memcpy(temp + 64, uv_buf + (n >> UVSHIFT) * SBPP2, \
+ SS(r, UVSHIFT) * SBPP2); \
+ ANY_SIMD(temp, temp + 64, temp + 128, MASK + 1); \
+ memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
+ }
+
+// Biplanar to RGB.
+#ifdef HAS_NV12TOARGBROW_SSSE3
+ANY21(NV12ToARGBRow_Any_SSSE3, NV12ToARGBRow_SSSE3, 1, 1, 2, 4, 7)
+ANY21(NV21ToARGBRow_Any_SSSE3, NV21ToARGBRow_SSSE3, 1, 1, 2, 4, 7)
+#endif
+#ifdef HAS_NV12TOARGBROW_AVX2
+ANY21(NV12ToARGBRow_Any_AVX2, NV12ToARGBRow_AVX2, 1, 1, 2, 4, 15)
+ANY21(NV21ToARGBRow_Any_AVX2, NV21ToARGBRow_AVX2, 1, 1, 2, 4, 15)
+#endif
+#ifdef HAS_NV12TOARGBROW_NEON
+ANY21(NV12ToARGBRow_Any_NEON, NV12ToARGBRow_NEON, 1, 1, 2, 4, 7)
+ANY21(NV21ToARGBRow_Any_NEON, NV21ToARGBRow_NEON, 1, 1, 2, 4, 7)
+#endif
+#ifdef HAS_NV12TORGB565ROW_SSSE3
+ANY21(NV12ToRGB565Row_Any_SSSE3, NV12ToRGB565Row_SSSE3, 1, 1, 2, 2, 7)
+ANY21(NV21ToRGB565Row_Any_SSSE3, NV21ToRGB565Row_SSSE3, 1, 1, 2, 2, 7)
+#endif
+#ifdef HAS_NV12TORGB565ROW_AVX2
+ANY21(NV12ToRGB565Row_Any_AVX2, NV12ToRGB565Row_AVX2, 1, 1, 2, 2, 15)
+ANY21(NV21ToRGB565Row_Any_AVX2, NV21ToRGB565Row_AVX2, 1, 1, 2, 2, 15)
+#endif
+#ifdef HAS_NV12TORGB565ROW_NEON
+ANY21(NV12ToRGB565Row_Any_NEON, NV12ToRGB565Row_NEON, 1, 1, 2, 2, 7)
+ANY21(NV21ToRGB565Row_Any_NEON, NV21ToRGB565Row_NEON, 1, 1, 2, 2, 7)
+#endif
+
+// Merge functions.
+#ifdef HAS_MERGEUVROW_SSE2
+ANY21(MergeUVRow_Any_SSE2, MergeUVRow_SSE2, 0, 1, 1, 2, 15)
+#endif
+#ifdef HAS_MERGEUVROW_AVX2
+ANY21(MergeUVRow_Any_AVX2, MergeUVRow_AVX2, 0, 1, 1, 2, 31)
+#endif
+#ifdef HAS_MERGEUVROW_NEON
+ANY21(MergeUVRow_Any_NEON, MergeUVRow_NEON, 0, 1, 1, 2, 15)
+#endif
+
+// Math functions.
+#ifdef HAS_ARGBMULTIPLYROW_SSE2
+ANY21(ARGBMultiplyRow_Any_SSE2, ARGBMultiplyRow_SSE2, 0, 4, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBADDROW_SSE2
+ANY21(ARGBAddRow_Any_SSE2, ARGBAddRow_SSE2, 0, 4, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBSUBTRACTROW_SSE2
+ANY21(ARGBSubtractRow_Any_SSE2, ARGBSubtractRow_SSE2, 0, 4, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBMULTIPLYROW_AVX2
+ANY21(ARGBMultiplyRow_Any_AVX2, ARGBMultiplyRow_AVX2, 0, 4, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBADDROW_AVX2
+ANY21(ARGBAddRow_Any_AVX2, ARGBAddRow_AVX2, 0, 4, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBSUBTRACTROW_AVX2
+ANY21(ARGBSubtractRow_Any_AVX2, ARGBSubtractRow_AVX2, 0, 4, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBMULTIPLYROW_NEON
+ANY21(ARGBMultiplyRow_Any_NEON, ARGBMultiplyRow_NEON, 0, 4, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBADDROW_NEON
+ANY21(ARGBAddRow_Any_NEON, ARGBAddRow_NEON, 0, 4, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBSUBTRACTROW_NEON
+ANY21(ARGBSubtractRow_Any_NEON, ARGBSubtractRow_NEON, 0, 4, 4, 4, 7)
+#endif
+#ifdef HAS_SOBELROW_SSE2
+ANY21(SobelRow_Any_SSE2, SobelRow_SSE2, 0, 1, 1, 4, 15)
+#endif
+#ifdef HAS_SOBELROW_NEON
+ANY21(SobelRow_Any_NEON, SobelRow_NEON, 0, 1, 1, 4, 7)
+#endif
+#ifdef HAS_SOBELTOPLANEROW_SSE2
+ANY21(SobelToPlaneRow_Any_SSE2, SobelToPlaneRow_SSE2, 0, 1, 1, 1, 15)
+#endif
+#ifdef HAS_SOBELTOPLANEROW_NEON
+ANY21(SobelToPlaneRow_Any_NEON, SobelToPlaneRow_NEON, 0, 1, 1, 1, 15)
+#endif
+#ifdef HAS_SOBELXYROW_SSE2
+ANY21(SobelXYRow_Any_SSE2, SobelXYRow_SSE2, 0, 1, 1, 4, 15)
+#endif
+#ifdef HAS_SOBELXYROW_NEON
+ANY21(SobelXYRow_Any_NEON, SobelXYRow_NEON, 0, 1, 1, 4, 7)
+#endif
+#undef ANY21
+
+// Any 1 to 1.
+#define ANY11(NAMEANY, ANY_SIMD, UVSHIFT, SBPP, BPP, MASK) \
+ void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, int width) { \
+ SIMD_ALIGNED(uint8 temp[128 * 2]); \
+ memset(temp, 0, 128); /* for YUY2 and msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(src_ptr, dst_ptr, n); \
+ } \
+ memcpy(temp, src_ptr + (n >> UVSHIFT) * SBPP, SS(r, UVSHIFT) * SBPP); \
+ ANY_SIMD(temp, temp + 128, MASK + 1); \
+ memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
+ }
+
+#ifdef HAS_COPYROW_AVX
+ANY11(CopyRow_Any_AVX, CopyRow_AVX, 0, 1, 1, 63)
+#endif
+#ifdef HAS_COPYROW_SSE2
+ANY11(CopyRow_Any_SSE2, CopyRow_SSE2, 0, 1, 1, 31)
+#endif
+#ifdef HAS_COPYROW_NEON
+ANY11(CopyRow_Any_NEON, CopyRow_NEON, 0, 1, 1, 31)
+#endif
+#if defined(HAS_ARGBTORGB24ROW_SSSE3)
+ANY11(ARGBToRGB24Row_Any_SSSE3, ARGBToRGB24Row_SSSE3, 0, 4, 3, 15)
+ANY11(ARGBToRAWRow_Any_SSSE3, ARGBToRAWRow_SSSE3, 0, 4, 3, 15)
+ANY11(ARGBToRGB565Row_Any_SSE2, ARGBToRGB565Row_SSE2, 0, 4, 2, 3)
+ANY11(ARGBToARGB1555Row_Any_SSE2, ARGBToARGB1555Row_SSE2, 0, 4, 2, 3)
+ANY11(ARGBToARGB4444Row_Any_SSE2, ARGBToARGB4444Row_SSE2, 0, 4, 2, 3)
+#endif
+#if defined(HAS_ARGBTOARGB4444ROW_AVX2)
+ANY11(ARGBToRGB565Row_Any_AVX2, ARGBToRGB565Row_AVX2, 0, 4, 2, 7)
+ANY11(ARGBToARGB1555Row_Any_AVX2, ARGBToARGB1555Row_AVX2, 0, 4, 2, 7)
+ANY11(ARGBToARGB4444Row_Any_AVX2, ARGBToARGB4444Row_AVX2, 0, 4, 2, 7)
+#endif
+#if defined(HAS_J400TOARGBROW_SSE2)
+ANY11(J400ToARGBRow_Any_SSE2, J400ToARGBRow_SSE2, 0, 1, 4, 7)
+#endif
+#if defined(HAS_J400TOARGBROW_AVX2)
+ANY11(J400ToARGBRow_Any_AVX2, J400ToARGBRow_AVX2, 0, 1, 4, 15)
+#endif
+#if defined(HAS_I400TOARGBROW_SSE2)
+ANY11(I400ToARGBRow_Any_SSE2, I400ToARGBRow_SSE2, 0, 1, 4, 7)
+#endif
+#if defined(HAS_I400TOARGBROW_AVX2)
+ANY11(I400ToARGBRow_Any_AVX2, I400ToARGBRow_AVX2, 0, 1, 4, 15)
+#endif
+#if defined(HAS_YUY2TOARGBROW_SSSE3)
+ANY11(YUY2ToARGBRow_Any_SSSE3, YUY2ToARGBRow_SSSE3, 1, 4, 4, 15)
+ANY11(UYVYToARGBRow_Any_SSSE3, UYVYToARGBRow_SSSE3, 1, 4, 4, 15)
+ANY11(RGB24ToARGBRow_Any_SSSE3, RGB24ToARGBRow_SSSE3, 0, 3, 4, 15)
+ANY11(RAWToARGBRow_Any_SSSE3, RAWToARGBRow_SSSE3, 0, 3, 4, 15)
+ANY11(RGB565ToARGBRow_Any_SSE2, RGB565ToARGBRow_SSE2, 0, 2, 4, 7)
+ANY11(ARGB1555ToARGBRow_Any_SSE2, ARGB1555ToARGBRow_SSE2, 0, 2, 4, 7)
+ANY11(ARGB4444ToARGBRow_Any_SSE2, ARGB4444ToARGBRow_SSE2, 0, 2, 4, 7)
+#endif
+#if defined(HAS_RGB565TOARGBROW_AVX2)
+ANY11(RGB565ToARGBRow_Any_AVX2, RGB565ToARGBRow_AVX2, 0, 2, 4, 15)
+#endif
+#if defined(HAS_ARGB1555TOARGBROW_AVX2)
+ANY11(ARGB1555ToARGBRow_Any_AVX2, ARGB1555ToARGBRow_AVX2, 0, 2, 4, 15)
+#endif
+#if defined(HAS_ARGB4444TOARGBROW_AVX2)
+ANY11(ARGB4444ToARGBRow_Any_AVX2, ARGB4444ToARGBRow_AVX2, 0, 2, 4, 15)
+#endif
+#if defined(HAS_YUY2TOARGBROW_AVX2)
+ANY11(YUY2ToARGBRow_Any_AVX2, YUY2ToARGBRow_AVX2, 1, 4, 4, 31)
+ANY11(UYVYToARGBRow_Any_AVX2, UYVYToARGBRow_AVX2, 1, 4, 4, 31)
+#endif
+#if defined(HAS_ARGBTORGB24ROW_NEON)
+ANY11(ARGBToRGB24Row_Any_NEON, ARGBToRGB24Row_NEON, 0, 4, 3, 7)
+ANY11(ARGBToRAWRow_Any_NEON, ARGBToRAWRow_NEON, 0, 4, 3, 7)
+ANY11(ARGBToRGB565Row_Any_NEON, ARGBToRGB565Row_NEON, 0, 4, 2, 7)
+ANY11(ARGBToARGB1555Row_Any_NEON, ARGBToARGB1555Row_NEON, 0, 4, 2, 7)
+ANY11(ARGBToARGB4444Row_Any_NEON, ARGBToARGB4444Row_NEON, 0, 4, 2, 7)
+ANY11(J400ToARGBRow_Any_NEON, J400ToARGBRow_NEON, 0, 1, 4, 7)
+ANY11(I400ToARGBRow_Any_NEON, I400ToARGBRow_NEON, 0, 1, 4, 7)
+ANY11(YUY2ToARGBRow_Any_NEON, YUY2ToARGBRow_NEON, 1, 4, 4, 7)
+ANY11(UYVYToARGBRow_Any_NEON, UYVYToARGBRow_NEON, 1, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBTOYROW_AVX2
+ANY11(ARGBToYRow_Any_AVX2, ARGBToYRow_AVX2, 0, 4, 1, 31)
+#endif
+#ifdef HAS_ARGBTOYJROW_AVX2
+ANY11(ARGBToYJRow_Any_AVX2, ARGBToYJRow_AVX2, 0, 4, 1, 31)
+#endif
+#ifdef HAS_UYVYTOYROW_AVX2
+ANY11(UYVYToYRow_Any_AVX2, UYVYToYRow_AVX2, 0, 2, 1, 31)
+#endif
+#ifdef HAS_YUY2TOYROW_AVX2
+ANY11(YUY2ToYRow_Any_AVX2, YUY2ToYRow_AVX2, 1, 4, 1, 31)
+#endif
+#ifdef HAS_ARGBTOYROW_SSSE3
+ANY11(ARGBToYRow_Any_SSSE3, ARGBToYRow_SSSE3, 0, 4, 1, 15)
+#endif
+#ifdef HAS_BGRATOYROW_SSSE3
+ANY11(BGRAToYRow_Any_SSSE3, BGRAToYRow_SSSE3, 0, 4, 1, 15)
+ANY11(ABGRToYRow_Any_SSSE3, ABGRToYRow_SSSE3, 0, 4, 1, 15)
+ANY11(RGBAToYRow_Any_SSSE3, RGBAToYRow_SSSE3, 0, 4, 1, 15)
+ANY11(YUY2ToYRow_Any_SSE2, YUY2ToYRow_SSE2, 1, 4, 1, 15)
+ANY11(UYVYToYRow_Any_SSE2, UYVYToYRow_SSE2, 1, 4, 1, 15)
+#endif
+#ifdef HAS_ARGBTOYJROW_SSSE3
+ANY11(ARGBToYJRow_Any_SSSE3, ARGBToYJRow_SSSE3, 0, 4, 1, 15)
+#endif
+#ifdef HAS_ARGBTOYROW_NEON
+ANY11(ARGBToYRow_Any_NEON, ARGBToYRow_NEON, 0, 4, 1, 7)
+#endif
+#ifdef HAS_ARGBTOYJROW_NEON
+ANY11(ARGBToYJRow_Any_NEON, ARGBToYJRow_NEON, 0, 4, 1, 7)
+#endif
+#ifdef HAS_BGRATOYROW_NEON
+ANY11(BGRAToYRow_Any_NEON, BGRAToYRow_NEON, 0, 4, 1, 7)
+#endif
+#ifdef HAS_ABGRTOYROW_NEON
+ANY11(ABGRToYRow_Any_NEON, ABGRToYRow_NEON, 0, 4, 1, 7)
+#endif
+#ifdef HAS_RGBATOYROW_NEON
+ANY11(RGBAToYRow_Any_NEON, RGBAToYRow_NEON, 0, 4, 1, 7)
+#endif
+#ifdef HAS_RGB24TOYROW_NEON
+ANY11(RGB24ToYRow_Any_NEON, RGB24ToYRow_NEON, 0, 3, 1, 7)
+#endif
+#ifdef HAS_RAWTOYROW_NEON
+ANY11(RAWToYRow_Any_NEON, RAWToYRow_NEON, 0, 3, 1, 7)
+#endif
+#ifdef HAS_RGB565TOYROW_NEON
+ANY11(RGB565ToYRow_Any_NEON, RGB565ToYRow_NEON, 0, 2, 1, 7)
+#endif
+#ifdef HAS_ARGB1555TOYROW_NEON
+ANY11(ARGB1555ToYRow_Any_NEON, ARGB1555ToYRow_NEON, 0, 2, 1, 7)
+#endif
+#ifdef HAS_ARGB4444TOYROW_NEON
+ANY11(ARGB4444ToYRow_Any_NEON, ARGB4444ToYRow_NEON, 0, 2, 1, 7)
+#endif
+#ifdef HAS_YUY2TOYROW_NEON
+ANY11(YUY2ToYRow_Any_NEON, YUY2ToYRow_NEON, 1, 4, 1, 15)
+#endif
+#ifdef HAS_UYVYTOYROW_NEON
+ANY11(UYVYToYRow_Any_NEON, UYVYToYRow_NEON, 0, 2, 1, 15)
+#endif
+#ifdef HAS_RGB24TOARGBROW_NEON
+ANY11(RGB24ToARGBRow_Any_NEON, RGB24ToARGBRow_NEON, 0, 3, 4, 7)
+#endif
+#ifdef HAS_RAWTOARGBROW_NEON
+ANY11(RAWToARGBRow_Any_NEON, RAWToARGBRow_NEON, 0, 3, 4, 7)
+#endif
+#ifdef HAS_RGB565TOARGBROW_NEON
+ANY11(RGB565ToARGBRow_Any_NEON, RGB565ToARGBRow_NEON, 0, 2, 4, 7)
+#endif
+#ifdef HAS_ARGB1555TOARGBROW_NEON
+ANY11(ARGB1555ToARGBRow_Any_NEON, ARGB1555ToARGBRow_NEON, 0, 2, 4, 7)
+#endif
+#ifdef HAS_ARGB4444TOARGBROW_NEON
+ANY11(ARGB4444ToARGBRow_Any_NEON, ARGB4444ToARGBRow_NEON, 0, 2, 4, 7)
+#endif
+#ifdef HAS_ARGBATTENUATEROW_SSSE3
+ANY11(ARGBAttenuateRow_Any_SSSE3, ARGBAttenuateRow_SSSE3, 0, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBATTENUATEROW_SSE2
+ANY11(ARGBAttenuateRow_Any_SSE2, ARGBAttenuateRow_SSE2, 0, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBUNATTENUATEROW_SSE2
+ANY11(ARGBUnattenuateRow_Any_SSE2, ARGBUnattenuateRow_SSE2, 0, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBATTENUATEROW_AVX2
+ANY11(ARGBAttenuateRow_Any_AVX2, ARGBAttenuateRow_AVX2, 0, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBUNATTENUATEROW_AVX2
+ANY11(ARGBUnattenuateRow_Any_AVX2, ARGBUnattenuateRow_AVX2, 0, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBATTENUATEROW_NEON
+ANY11(ARGBAttenuateRow_Any_NEON, ARGBAttenuateRow_NEON, 0, 4, 4, 7)
+#endif
+#undef ANY11
+
+// Any 1 to 1 with parameter.
+#define ANY11P(NAMEANY, ANY_SIMD, T, SBPP, BPP, MASK) \
+ void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, \
+ T shuffler, int width) { \
+ SIMD_ALIGNED(uint8 temp[64 * 2]); \
+ memset(temp, 0, 64); /* for msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(src_ptr, dst_ptr, shuffler, n); \
+ } \
+ memcpy(temp, src_ptr + n * SBPP, r * SBPP); \
+ ANY_SIMD(temp, temp + 64, shuffler, MASK + 1); \
+ memcpy(dst_ptr + n * BPP, temp + 64, r * BPP); \
+ }
+
+#if defined(HAS_ARGBTORGB565DITHERROW_SSE2)
+ANY11P(ARGBToRGB565DitherRow_Any_SSE2, ARGBToRGB565DitherRow_SSE2,
+ const uint32, 4, 2, 3)
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_AVX2)
+ANY11P(ARGBToRGB565DitherRow_Any_AVX2, ARGBToRGB565DitherRow_AVX2,
+ const uint32, 4, 2, 7)
+#endif
+#if defined(HAS_ARGBTORGB565DITHERROW_NEON)
+ANY11P(ARGBToRGB565DitherRow_Any_NEON, ARGBToRGB565DitherRow_NEON,
+ const uint32, 4, 2, 7)
+#endif
+#ifdef HAS_ARGBSHUFFLEROW_SSE2
+ANY11P(ARGBShuffleRow_Any_SSE2, ARGBShuffleRow_SSE2, const uint8*, 4, 4, 3)
+#endif
+#ifdef HAS_ARGBSHUFFLEROW_SSSE3
+ANY11P(ARGBShuffleRow_Any_SSSE3, ARGBShuffleRow_SSSE3, const uint8*, 4, 4, 7)
+#endif
+#ifdef HAS_ARGBSHUFFLEROW_AVX2
+ANY11P(ARGBShuffleRow_Any_AVX2, ARGBShuffleRow_AVX2, const uint8*, 4, 4, 15)
+#endif
+#ifdef HAS_ARGBSHUFFLEROW_NEON
+ANY11P(ARGBShuffleRow_Any_NEON, ARGBShuffleRow_NEON, const uint8*, 4, 4, 3)
+#endif
+#undef ANY11P
+
+// Any 1 to 1 interpolate. Takes 2 rows of source via stride.
+#define ANY11T(NAMEANY, ANY_SIMD, SBPP, BPP, MASK) \
+ void NAMEANY(uint8* dst_ptr, const uint8* src_ptr, \
+ ptrdiff_t src_stride_ptr, int width, \
+ int source_y_fraction) { \
+ SIMD_ALIGNED(uint8 temp[64 * 3]); \
+ memset(temp, 0, 64 * 2); /* for msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(dst_ptr, src_ptr, src_stride_ptr, n, source_y_fraction); \
+ } \
+ memcpy(temp, src_ptr + n * SBPP, r * SBPP); \
+ memcpy(temp + 64, src_ptr + src_stride_ptr + n * SBPP, r * SBPP); \
+ ANY_SIMD(temp + 128, temp, 64, MASK + 1, source_y_fraction); \
+ memcpy(dst_ptr + n * BPP, temp + 128, r * BPP); \
+ }
+
+#ifdef HAS_INTERPOLATEROW_AVX2
+ANY11T(InterpolateRow_Any_AVX2, InterpolateRow_AVX2, 1, 1, 31)
+#endif
+#ifdef HAS_INTERPOLATEROW_SSSE3
+ANY11T(InterpolateRow_Any_SSSE3, InterpolateRow_SSSE3, 1, 1, 15)
+#endif
+#ifdef HAS_INTERPOLATEROW_SSE2
+ANY11T(InterpolateRow_Any_SSE2, InterpolateRow_SSE2, 1, 1, 15)
+#endif
+#ifdef HAS_INTERPOLATEROW_NEON
+ANY11T(InterpolateRow_Any_NEON, InterpolateRow_NEON, 1, 1, 15)
+#endif
+#ifdef HAS_INTERPOLATEROW_MIPS_DSPR2
+ANY11T(InterpolateRow_Any_MIPS_DSPR2, InterpolateRow_MIPS_DSPR2, 1, 1, 3)
+#endif
+#undef ANY11T
+
+// Any 1 to 1 mirror.
+#define ANY11M(NAMEANY, ANY_SIMD, BPP, MASK) \
+ void NAMEANY(const uint8* src_ptr, uint8* dst_ptr, int width) { \
+ SIMD_ALIGNED(uint8 temp[64 * 2]); \
+ memset(temp, 0, 64); /* for msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(src_ptr + r * BPP, dst_ptr, n); \
+ } \
+ memcpy(temp, src_ptr, r * BPP); \
+ ANY_SIMD(temp, temp + 64, MASK + 1); \
+ memcpy(dst_ptr + n * BPP, temp + 64 + (MASK + 1 - r) * BPP, r * BPP); \
+ }
+
+#ifdef HAS_MIRRORROW_AVX2
+ANY11M(MirrorRow_Any_AVX2, MirrorRow_AVX2, 1, 31)
+#endif
+#ifdef HAS_MIRRORROW_SSSE3
+ANY11M(MirrorRow_Any_SSSE3, MirrorRow_SSSE3, 1, 15)
+#endif
+#ifdef HAS_MIRRORROW_SSE2
+ANY11M(MirrorRow_Any_SSE2, MirrorRow_SSE2, 1, 15)
+#endif
+#ifdef HAS_MIRRORROW_NEON
+ANY11M(MirrorRow_Any_NEON, MirrorRow_NEON, 1, 15)
+#endif
+#ifdef HAS_ARGBMIRRORROW_AVX2
+ANY11M(ARGBMirrorRow_Any_AVX2, ARGBMirrorRow_AVX2, 4, 7)
+#endif
+#ifdef HAS_ARGBMIRRORROW_SSE2
+ANY11M(ARGBMirrorRow_Any_SSE2, ARGBMirrorRow_SSE2, 4, 3)
+#endif
+#ifdef HAS_ARGBMIRRORROW_NEON
+ANY11M(ARGBMirrorRow_Any_NEON, ARGBMirrorRow_NEON, 4, 3)
+#endif
+#undef ANY11M
+
+// Any 1 plane. (memset)
+#define ANY1(NAMEANY, ANY_SIMD, T, BPP, MASK) \
+ void NAMEANY(uint8* dst_ptr, T v32, int width) { \
+ SIMD_ALIGNED(uint8 temp[64]); \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(dst_ptr, v32, n); \
+ } \
+ ANY_SIMD(temp, v32, MASK + 1); \
+ memcpy(dst_ptr + n * BPP, temp, r * BPP); \
+ }
+
+#ifdef HAS_SETROW_X86
+ANY1(SetRow_Any_X86, SetRow_X86, uint8, 1, 3)
+#endif
+#ifdef HAS_SETROW_NEON
+ANY1(SetRow_Any_NEON, SetRow_NEON, uint8, 1, 15)
+#endif
+#ifdef HAS_ARGBSETROW_NEON
+ANY1(ARGBSetRow_Any_NEON, ARGBSetRow_NEON, uint32, 4, 3)
+#endif
+#undef ANY1
+
+// Any 1 to 2. Outputs UV planes.
+#define ANY12(NAMEANY, ANY_SIMD, UVSHIFT, BPP, DUVSHIFT, MASK) \
+ void NAMEANY(const uint8* src_ptr, uint8* dst_u, uint8* dst_v, int width) {\
+ SIMD_ALIGNED(uint8 temp[128 * 3]); \
+ memset(temp, 0, 128); /* for msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(src_ptr, dst_u, dst_v, n); \
+ } \
+ memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP); \
+ if ((width & 1) && BPP == 4) { /* repeat last 4 bytes for subsampler */ \
+ memcpy(temp + SS(r, UVSHIFT) * BPP, \
+ temp + SS(r, UVSHIFT) * BPP - BPP, 4); \
+ } \
+ ANY_SIMD(temp, temp + 128, temp + 256, MASK + 1); \
+ memcpy(dst_u + (n >> DUVSHIFT), temp + 128, SS(r, DUVSHIFT)); \
+ memcpy(dst_v + (n >> DUVSHIFT), temp + 256, SS(r, DUVSHIFT)); \
+ }
+
+#ifdef HAS_SPLITUVROW_SSE2
+ANY12(SplitUVRow_Any_SSE2, SplitUVRow_SSE2, 0, 2, 0, 15)
+#endif
+#ifdef HAS_SPLITUVROW_AVX2
+ANY12(SplitUVRow_Any_AVX2, SplitUVRow_AVX2, 0, 2, 0, 31)
+#endif
+#ifdef HAS_SPLITUVROW_NEON
+ANY12(SplitUVRow_Any_NEON, SplitUVRow_NEON, 0, 2, 0, 15)
+#endif
+#ifdef HAS_SPLITUVROW_MIPS_DSPR2
+ANY12(SplitUVRow_Any_MIPS_DSPR2, SplitUVRow_MIPS_DSPR2, 0, 2, 0, 15)
+#endif
+#ifdef HAS_ARGBTOUV444ROW_SSSE3
+ANY12(ARGBToUV444Row_Any_SSSE3, ARGBToUV444Row_SSSE3, 0, 4, 0, 15)
+#endif
+#ifdef HAS_YUY2TOUV422ROW_AVX2
+ANY12(YUY2ToUV422Row_Any_AVX2, YUY2ToUV422Row_AVX2, 1, 4, 1, 31)
+ANY12(UYVYToUV422Row_Any_AVX2, UYVYToUV422Row_AVX2, 1, 4, 1, 31)
+#endif
+#ifdef HAS_ARGBTOUV422ROW_SSSE3
+ANY12(ARGBToUV422Row_Any_SSSE3, ARGBToUV422Row_SSSE3, 0, 4, 1, 15)
+#endif
+#ifdef HAS_YUY2TOUV422ROW_SSE2
+ANY12(YUY2ToUV422Row_Any_SSE2, YUY2ToUV422Row_SSE2, 1, 4, 1, 15)
+ANY12(UYVYToUV422Row_Any_SSE2, UYVYToUV422Row_SSE2, 1, 4, 1, 15)
+#endif
+#ifdef HAS_YUY2TOUV422ROW_NEON
+ANY12(ARGBToUV444Row_Any_NEON, ARGBToUV444Row_NEON, 0, 4, 0, 7)
+ANY12(ARGBToUV422Row_Any_NEON, ARGBToUV422Row_NEON, 0, 4, 1, 15)
+ANY12(ARGBToUV411Row_Any_NEON, ARGBToUV411Row_NEON, 0, 4, 2, 31)
+ANY12(YUY2ToUV422Row_Any_NEON, YUY2ToUV422Row_NEON, 1, 4, 1, 15)
+ANY12(UYVYToUV422Row_Any_NEON, UYVYToUV422Row_NEON, 1, 4, 1, 15)
+#endif
+#undef ANY12
+
+// Any 1 to 2 with source stride (2 rows of source). Outputs UV planes.
+// 128 byte row allows for 32 avx ARGB pixels.
+#define ANY12S(NAMEANY, ANY_SIMD, UVSHIFT, BPP, MASK) \
+ void NAMEANY(const uint8* src_ptr, int src_stride_ptr, \
+ uint8* dst_u, uint8* dst_v, int width) { \
+ SIMD_ALIGNED(uint8 temp[128 * 4]); \
+ memset(temp, 0, 128 * 2); /* for msan */ \
+ int r = width & MASK; \
+ int n = width & ~MASK; \
+ if (n > 0) { \
+ ANY_SIMD(src_ptr, src_stride_ptr, dst_u, dst_v, n); \
+ } \
+ memcpy(temp, src_ptr + (n >> UVSHIFT) * BPP, SS(r, UVSHIFT) * BPP); \
+ memcpy(temp + 128, src_ptr + src_stride_ptr + (n >> UVSHIFT) * BPP, \
+ SS(r, UVSHIFT) * BPP); \
+ if ((width & 1) && BPP == 4) { /* repeat last 4 bytes for subsampler */ \
+ memcpy(temp + SS(r, UVSHIFT) * BPP, \
+ temp + SS(r, UVSHIFT) * BPP - BPP, 4); \
+ memcpy(temp + 128 + SS(r, UVSHIFT) * BPP, \
+ temp + 128 + SS(r, UVSHIFT) * BPP - BPP, 4); \
+ } \
+ ANY_SIMD(temp, 128, temp + 256, temp + 384, MASK + 1); \
+ memcpy(dst_u + (n >> 1), temp + 256, SS(r, 1)); \
+ memcpy(dst_v + (n >> 1), temp + 384, SS(r, 1)); \
+ }
+
+#ifdef HAS_ARGBTOUVROW_AVX2
+ANY12S(ARGBToUVRow_Any_AVX2, ARGBToUVRow_AVX2, 0, 4, 31)
+#endif
+#ifdef HAS_ARGBTOUVROW_SSSE3
+ANY12S(ARGBToUVRow_Any_SSSE3, ARGBToUVRow_SSSE3, 0, 4, 15)
+ANY12S(ARGBToUVJRow_Any_SSSE3, ARGBToUVJRow_SSSE3, 0, 4, 15)
+ANY12S(BGRAToUVRow_Any_SSSE3, BGRAToUVRow_SSSE3, 0, 4, 15)
+ANY12S(ABGRToUVRow_Any_SSSE3, ABGRToUVRow_SSSE3, 0, 4, 15)
+ANY12S(RGBAToUVRow_Any_SSSE3, RGBAToUVRow_SSSE3, 0, 4, 15)
+#endif
+#ifdef HAS_YUY2TOUVROW_AVX2
+ANY12S(YUY2ToUVRow_Any_AVX2, YUY2ToUVRow_AVX2, 1, 4, 31)
+ANY12S(UYVYToUVRow_Any_AVX2, UYVYToUVRow_AVX2, 1, 4, 31)
+#endif
+#ifdef HAS_YUY2TOUVROW_SSE2
+ANY12S(YUY2ToUVRow_Any_SSE2, YUY2ToUVRow_SSE2, 1, 4, 15)
+ANY12S(UYVYToUVRow_Any_SSE2, UYVYToUVRow_SSE2, 1, 4, 15)
+#endif
+#ifdef HAS_ARGBTOUVROW_NEON
+ANY12S(ARGBToUVRow_Any_NEON, ARGBToUVRow_NEON, 0, 4, 15)
+#endif
+#ifdef HAS_ARGBTOUVJROW_NEON
+ANY12S(ARGBToUVJRow_Any_NEON, ARGBToUVJRow_NEON, 0, 4, 15)
+#endif
+#ifdef HAS_BGRATOUVROW_NEON
+ANY12S(BGRAToUVRow_Any_NEON, BGRAToUVRow_NEON, 0, 4, 15)
+#endif
+#ifdef HAS_ABGRTOUVROW_NEON
+ANY12S(ABGRToUVRow_Any_NEON, ABGRToUVRow_NEON, 0, 4, 15)
+#endif
+#ifdef HAS_RGBATOUVROW_NEON
+ANY12S(RGBAToUVRow_Any_NEON, RGBAToUVRow_NEON, 0, 4, 15)
+#endif
+#ifdef HAS_RGB24TOUVROW_NEON
+ANY12S(RGB24ToUVRow_Any_NEON, RGB24ToUVRow_NEON, 0, 3, 15)
+#endif
+#ifdef HAS_RAWTOUVROW_NEON
+ANY12S(RAWToUVRow_Any_NEON, RAWToUVRow_NEON, 0, 3, 15)
+#endif
+#ifdef HAS_RGB565TOUVROW_NEON
+ANY12S(RGB565ToUVRow_Any_NEON, RGB565ToUVRow_NEON, 0, 2, 15)
+#endif
+#ifdef HAS_ARGB1555TOUVROW_NEON
+ANY12S(ARGB1555ToUVRow_Any_NEON, ARGB1555ToUVRow_NEON, 0, 2, 15)
+#endif
+#ifdef HAS_ARGB4444TOUVROW_NEON
+ANY12S(ARGB4444ToUVRow_Any_NEON, ARGB4444ToUVRow_NEON, 0, 2, 15)
+#endif
+#ifdef HAS_YUY2TOUVROW_NEON
+ANY12S(YUY2ToUVRow_Any_NEON, YUY2ToUVRow_NEON, 1, 4, 15)
+#endif
+#ifdef HAS_UYVYTOUVROW_NEON
+ANY12S(UYVYToUVRow_Any_NEON, UYVYToUVRow_NEON, 1, 4, 15)
+#endif
+#undef ANY12S
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_common.cc b/media/libaom/src/third_party/libyuv/source/row_common.cc
new file mode 100644
index 000000000..49875894f
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_common.cc
@@ -0,0 +1,2576 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#include <string.h> // For memcpy and memset.
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// llvm x86 is poor at ternary operator, so use branchless min/max.
+
+#define USE_BRANCHLESS 1
+#if USE_BRANCHLESS
+static __inline int32 clamp0(int32 v) {
+ return ((-(v) >> 31) & (v));
+}
+
+static __inline int32 clamp255(int32 v) {
+ return (((255 - (v)) >> 31) | (v)) & 255;
+}
+
+static __inline uint32 Clamp(int32 val) {
+ int v = clamp0(val);
+ return (uint32)(clamp255(v));
+}
+
+static __inline uint32 Abs(int32 v) {
+ int m = v >> 31;
+ return (v + m) ^ m;
+}
+#else // USE_BRANCHLESS
+static __inline int32 clamp0(int32 v) {
+ return (v < 0) ? 0 : v;
+}
+
+static __inline int32 clamp255(int32 v) {
+ return (v > 255) ? 255 : v;
+}
+
+static __inline uint32 Clamp(int32 val) {
+ int v = clamp0(val);
+ return (uint32)(clamp255(v));
+}
+
+static __inline uint32 Abs(int32 v) {
+ return (v < 0) ? -v : v;
+}
+#endif // USE_BRANCHLESS
+
+#ifdef LIBYUV_LITTLE_ENDIAN
+#define WRITEWORD(p, v) *(uint32*)(p) = v
+#else
+static inline void WRITEWORD(uint8* p, uint32 v) {
+ p[0] = (uint8)(v & 255);
+ p[1] = (uint8)((v >> 8) & 255);
+ p[2] = (uint8)((v >> 16) & 255);
+ p[3] = (uint8)((v >> 24) & 255);
+}
+#endif
+
+void RGB24ToARGBRow_C(const uint8* src_rgb24, uint8* dst_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_rgb24[0];
+ uint8 g = src_rgb24[1];
+ uint8 r = src_rgb24[2];
+ dst_argb[0] = b;
+ dst_argb[1] = g;
+ dst_argb[2] = r;
+ dst_argb[3] = 255u;
+ dst_argb += 4;
+ src_rgb24 += 3;
+ }
+}
+
+void RAWToARGBRow_C(const uint8* src_raw, uint8* dst_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 r = src_raw[0];
+ uint8 g = src_raw[1];
+ uint8 b = src_raw[2];
+ dst_argb[0] = b;
+ dst_argb[1] = g;
+ dst_argb[2] = r;
+ dst_argb[3] = 255u;
+ dst_argb += 4;
+ src_raw += 3;
+ }
+}
+
+void RGB565ToARGBRow_C(const uint8* src_rgb565, uint8* dst_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_rgb565[0] & 0x1f;
+ uint8 g = (src_rgb565[0] >> 5) | ((src_rgb565[1] & 0x07) << 3);
+ uint8 r = src_rgb565[1] >> 3;
+ dst_argb[0] = (b << 3) | (b >> 2);
+ dst_argb[1] = (g << 2) | (g >> 4);
+ dst_argb[2] = (r << 3) | (r >> 2);
+ dst_argb[3] = 255u;
+ dst_argb += 4;
+ src_rgb565 += 2;
+ }
+}
+
+void ARGB1555ToARGBRow_C(const uint8* src_argb1555, uint8* dst_argb,
+ int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_argb1555[0] & 0x1f;
+ uint8 g = (src_argb1555[0] >> 5) | ((src_argb1555[1] & 0x03) << 3);
+ uint8 r = (src_argb1555[1] & 0x7c) >> 2;
+ uint8 a = src_argb1555[1] >> 7;
+ dst_argb[0] = (b << 3) | (b >> 2);
+ dst_argb[1] = (g << 3) | (g >> 2);
+ dst_argb[2] = (r << 3) | (r >> 2);
+ dst_argb[3] = -a;
+ dst_argb += 4;
+ src_argb1555 += 2;
+ }
+}
+
+void ARGB4444ToARGBRow_C(const uint8* src_argb4444, uint8* dst_argb,
+ int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_argb4444[0] & 0x0f;
+ uint8 g = src_argb4444[0] >> 4;
+ uint8 r = src_argb4444[1] & 0x0f;
+ uint8 a = src_argb4444[1] >> 4;
+ dst_argb[0] = (b << 4) | b;
+ dst_argb[1] = (g << 4) | g;
+ dst_argb[2] = (r << 4) | r;
+ dst_argb[3] = (a << 4) | a;
+ dst_argb += 4;
+ src_argb4444 += 2;
+ }
+}
+
+void ARGBToRGB24Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_argb[0];
+ uint8 g = src_argb[1];
+ uint8 r = src_argb[2];
+ dst_rgb[0] = b;
+ dst_rgb[1] = g;
+ dst_rgb[2] = r;
+ dst_rgb += 3;
+ src_argb += 4;
+ }
+}
+
+void ARGBToRAWRow_C(const uint8* src_argb, uint8* dst_rgb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_argb[0];
+ uint8 g = src_argb[1];
+ uint8 r = src_argb[2];
+ dst_rgb[0] = r;
+ dst_rgb[1] = g;
+ dst_rgb[2] = b;
+ dst_rgb += 3;
+ src_argb += 4;
+ }
+}
+
+void ARGBToRGB565Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 b0 = src_argb[0] >> 3;
+ uint8 g0 = src_argb[1] >> 2;
+ uint8 r0 = src_argb[2] >> 3;
+ uint8 b1 = src_argb[4] >> 3;
+ uint8 g1 = src_argb[5] >> 2;
+ uint8 r1 = src_argb[6] >> 3;
+ WRITEWORD(dst_rgb, b0 | (g0 << 5) | (r0 << 11) |
+ (b1 << 16) | (g1 << 21) | (r1 << 27));
+ dst_rgb += 4;
+ src_argb += 8;
+ }
+ if (width & 1) {
+ uint8 b0 = src_argb[0] >> 3;
+ uint8 g0 = src_argb[1] >> 2;
+ uint8 r0 = src_argb[2] >> 3;
+ *(uint16*)(dst_rgb) = b0 | (g0 << 5) | (r0 << 11);
+ }
+}
+
+// dither4 is a row of 4 values from 4x4 dither matrix.
+// The 4x4 matrix contains values to increase RGB. When converting to
+// fewer bits (565) this provides an ordered dither.
+// The order in the 4x4 matrix in first byte is upper left.
+// The 4 values are passed as an int, then referenced as an array, so
+// endian will not affect order of the original matrix. But the dither4
+// will containing the first pixel in the lower byte for little endian
+// or the upper byte for big endian.
+void ARGBToRGB565DitherRow_C(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ int dither0 = ((const unsigned char*)(&dither4))[x & 3];
+ int dither1 = ((const unsigned char*)(&dither4))[(x + 1) & 3];
+ uint8 b0 = clamp255(src_argb[0] + dither0) >> 3;
+ uint8 g0 = clamp255(src_argb[1] + dither0) >> 2;
+ uint8 r0 = clamp255(src_argb[2] + dither0) >> 3;
+ uint8 b1 = clamp255(src_argb[4] + dither1) >> 3;
+ uint8 g1 = clamp255(src_argb[5] + dither1) >> 2;
+ uint8 r1 = clamp255(src_argb[6] + dither1) >> 3;
+ WRITEWORD(dst_rgb, b0 | (g0 << 5) | (r0 << 11) |
+ (b1 << 16) | (g1 << 21) | (r1 << 27));
+ dst_rgb += 4;
+ src_argb += 8;
+ }
+ if (width & 1) {
+ int dither0 = ((const unsigned char*)(&dither4))[(width - 1) & 3];
+ uint8 b0 = clamp255(src_argb[0] + dither0) >> 3;
+ uint8 g0 = clamp255(src_argb[1] + dither0) >> 2;
+ uint8 r0 = clamp255(src_argb[2] + dither0) >> 3;
+ *(uint16*)(dst_rgb) = b0 | (g0 << 5) | (r0 << 11);
+ }
+}
+
+void ARGBToARGB1555Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 b0 = src_argb[0] >> 3;
+ uint8 g0 = src_argb[1] >> 3;
+ uint8 r0 = src_argb[2] >> 3;
+ uint8 a0 = src_argb[3] >> 7;
+ uint8 b1 = src_argb[4] >> 3;
+ uint8 g1 = src_argb[5] >> 3;
+ uint8 r1 = src_argb[6] >> 3;
+ uint8 a1 = src_argb[7] >> 7;
+ *(uint32*)(dst_rgb) =
+ b0 | (g0 << 5) | (r0 << 10) | (a0 << 15) |
+ (b1 << 16) | (g1 << 21) | (r1 << 26) | (a1 << 31);
+ dst_rgb += 4;
+ src_argb += 8;
+ }
+ if (width & 1) {
+ uint8 b0 = src_argb[0] >> 3;
+ uint8 g0 = src_argb[1] >> 3;
+ uint8 r0 = src_argb[2] >> 3;
+ uint8 a0 = src_argb[3] >> 7;
+ *(uint16*)(dst_rgb) =
+ b0 | (g0 << 5) | (r0 << 10) | (a0 << 15);
+ }
+}
+
+void ARGBToARGB4444Row_C(const uint8* src_argb, uint8* dst_rgb, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 b0 = src_argb[0] >> 4;
+ uint8 g0 = src_argb[1] >> 4;
+ uint8 r0 = src_argb[2] >> 4;
+ uint8 a0 = src_argb[3] >> 4;
+ uint8 b1 = src_argb[4] >> 4;
+ uint8 g1 = src_argb[5] >> 4;
+ uint8 r1 = src_argb[6] >> 4;
+ uint8 a1 = src_argb[7] >> 4;
+ *(uint32*)(dst_rgb) =
+ b0 | (g0 << 4) | (r0 << 8) | (a0 << 12) |
+ (b1 << 16) | (g1 << 20) | (r1 << 24) | (a1 << 28);
+ dst_rgb += 4;
+ src_argb += 8;
+ }
+ if (width & 1) {
+ uint8 b0 = src_argb[0] >> 4;
+ uint8 g0 = src_argb[1] >> 4;
+ uint8 r0 = src_argb[2] >> 4;
+ uint8 a0 = src_argb[3] >> 4;
+ *(uint16*)(dst_rgb) =
+ b0 | (g0 << 4) | (r0 << 8) | (a0 << 12);
+ }
+}
+
+static __inline int RGBToY(uint8 r, uint8 g, uint8 b) {
+ return (66 * r + 129 * g + 25 * b + 0x1080) >> 8;
+}
+
+static __inline int RGBToU(uint8 r, uint8 g, uint8 b) {
+ return (112 * b - 74 * g - 38 * r + 0x8080) >> 8;
+}
+static __inline int RGBToV(uint8 r, uint8 g, uint8 b) {
+ return (112 * r - 94 * g - 18 * b + 0x8080) >> 8;
+}
+
+#define MAKEROWY(NAME, R, G, B, BPP) \
+void NAME ## ToYRow_C(const uint8* src_argb0, uint8* dst_y, int width) { \
+ int x; \
+ for (x = 0; x < width; ++x) { \
+ dst_y[0] = RGBToY(src_argb0[R], src_argb0[G], src_argb0[B]); \
+ src_argb0 += BPP; \
+ dst_y += 1; \
+ } \
+} \
+void NAME ## ToUVRow_C(const uint8* src_rgb0, int src_stride_rgb, \
+ uint8* dst_u, uint8* dst_v, int width) { \
+ const uint8* src_rgb1 = src_rgb0 + src_stride_rgb; \
+ int x; \
+ for (x = 0; x < width - 1; x += 2) { \
+ uint8 ab = (src_rgb0[B] + src_rgb0[B + BPP] + \
+ src_rgb1[B] + src_rgb1[B + BPP]) >> 2; \
+ uint8 ag = (src_rgb0[G] + src_rgb0[G + BPP] + \
+ src_rgb1[G] + src_rgb1[G + BPP]) >> 2; \
+ uint8 ar = (src_rgb0[R] + src_rgb0[R + BPP] + \
+ src_rgb1[R] + src_rgb1[R + BPP]) >> 2; \
+ dst_u[0] = RGBToU(ar, ag, ab); \
+ dst_v[0] = RGBToV(ar, ag, ab); \
+ src_rgb0 += BPP * 2; \
+ src_rgb1 += BPP * 2; \
+ dst_u += 1; \
+ dst_v += 1; \
+ } \
+ if (width & 1) { \
+ uint8 ab = (src_rgb0[B] + src_rgb1[B]) >> 1; \
+ uint8 ag = (src_rgb0[G] + src_rgb1[G]) >> 1; \
+ uint8 ar = (src_rgb0[R] + src_rgb1[R]) >> 1; \
+ dst_u[0] = RGBToU(ar, ag, ab); \
+ dst_v[0] = RGBToV(ar, ag, ab); \
+ } \
+}
+
+MAKEROWY(ARGB, 2, 1, 0, 4)
+MAKEROWY(BGRA, 1, 2, 3, 4)
+MAKEROWY(ABGR, 0, 1, 2, 4)
+MAKEROWY(RGBA, 3, 2, 1, 4)
+MAKEROWY(RGB24, 2, 1, 0, 3)
+MAKEROWY(RAW, 0, 1, 2, 3)
+#undef MAKEROWY
+
+// JPeg uses a variation on BT.601-1 full range
+// y = 0.29900 * r + 0.58700 * g + 0.11400 * b
+// u = -0.16874 * r - 0.33126 * g + 0.50000 * b + center
+// v = 0.50000 * r - 0.41869 * g - 0.08131 * b + center
+// BT.601 Mpeg range uses:
+// b 0.1016 * 255 = 25.908 = 25
+// g 0.5078 * 255 = 129.489 = 129
+// r 0.2578 * 255 = 65.739 = 66
+// JPeg 8 bit Y (not used):
+// b 0.11400 * 256 = 29.184 = 29
+// g 0.58700 * 256 = 150.272 = 150
+// r 0.29900 * 256 = 76.544 = 77
+// JPeg 7 bit Y:
+// b 0.11400 * 128 = 14.592 = 15
+// g 0.58700 * 128 = 75.136 = 75
+// r 0.29900 * 128 = 38.272 = 38
+// JPeg 8 bit U:
+// b 0.50000 * 255 = 127.5 = 127
+// g -0.33126 * 255 = -84.4713 = -84
+// r -0.16874 * 255 = -43.0287 = -43
+// JPeg 8 bit V:
+// b -0.08131 * 255 = -20.73405 = -20
+// g -0.41869 * 255 = -106.76595 = -107
+// r 0.50000 * 255 = 127.5 = 127
+
+static __inline int RGBToYJ(uint8 r, uint8 g, uint8 b) {
+ return (38 * r + 75 * g + 15 * b + 64) >> 7;
+}
+
+static __inline int RGBToUJ(uint8 r, uint8 g, uint8 b) {
+ return (127 * b - 84 * g - 43 * r + 0x8080) >> 8;
+}
+static __inline int RGBToVJ(uint8 r, uint8 g, uint8 b) {
+ return (127 * r - 107 * g - 20 * b + 0x8080) >> 8;
+}
+
+#define AVGB(a, b) (((a) + (b) + 1) >> 1)
+
+#define MAKEROWYJ(NAME, R, G, B, BPP) \
+void NAME ## ToYJRow_C(const uint8* src_argb0, uint8* dst_y, int width) { \
+ int x; \
+ for (x = 0; x < width; ++x) { \
+ dst_y[0] = RGBToYJ(src_argb0[R], src_argb0[G], src_argb0[B]); \
+ src_argb0 += BPP; \
+ dst_y += 1; \
+ } \
+} \
+void NAME ## ToUVJRow_C(const uint8* src_rgb0, int src_stride_rgb, \
+ uint8* dst_u, uint8* dst_v, int width) { \
+ const uint8* src_rgb1 = src_rgb0 + src_stride_rgb; \
+ int x; \
+ for (x = 0; x < width - 1; x += 2) { \
+ uint8 ab = AVGB(AVGB(src_rgb0[B], src_rgb1[B]), \
+ AVGB(src_rgb0[B + BPP], src_rgb1[B + BPP])); \
+ uint8 ag = AVGB(AVGB(src_rgb0[G], src_rgb1[G]), \
+ AVGB(src_rgb0[G + BPP], src_rgb1[G + BPP])); \
+ uint8 ar = AVGB(AVGB(src_rgb0[R], src_rgb1[R]), \
+ AVGB(src_rgb0[R + BPP], src_rgb1[R + BPP])); \
+ dst_u[0] = RGBToUJ(ar, ag, ab); \
+ dst_v[0] = RGBToVJ(ar, ag, ab); \
+ src_rgb0 += BPP * 2; \
+ src_rgb1 += BPP * 2; \
+ dst_u += 1; \
+ dst_v += 1; \
+ } \
+ if (width & 1) { \
+ uint8 ab = AVGB(src_rgb0[B], src_rgb1[B]); \
+ uint8 ag = AVGB(src_rgb0[G], src_rgb1[G]); \
+ uint8 ar = AVGB(src_rgb0[R], src_rgb1[R]); \
+ dst_u[0] = RGBToUJ(ar, ag, ab); \
+ dst_v[0] = RGBToVJ(ar, ag, ab); \
+ } \
+}
+
+MAKEROWYJ(ARGB, 2, 1, 0, 4)
+#undef MAKEROWYJ
+
+void ARGBToUVJ422Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 ab = (src_argb[0] + src_argb[4]) >> 1;
+ uint8 ag = (src_argb[1] + src_argb[5]) >> 1;
+ uint8 ar = (src_argb[2] + src_argb[6]) >> 1;
+ dst_u[0] = RGBToUJ(ar, ag, ab);
+ dst_v[0] = RGBToVJ(ar, ag, ab);
+ src_argb += 8;
+ dst_u += 1;
+ dst_v += 1;
+ }
+ if (width & 1) {
+ uint8 ab = src_argb[0];
+ uint8 ag = src_argb[1];
+ uint8 ar = src_argb[2];
+ dst_u[0] = RGBToUJ(ar, ag, ab);
+ dst_v[0] = RGBToVJ(ar, ag, ab);
+ }
+}
+
+void RGB565ToYRow_C(const uint8* src_rgb565, uint8* dst_y, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_rgb565[0] & 0x1f;
+ uint8 g = (src_rgb565[0] >> 5) | ((src_rgb565[1] & 0x07) << 3);
+ uint8 r = src_rgb565[1] >> 3;
+ b = (b << 3) | (b >> 2);
+ g = (g << 2) | (g >> 4);
+ r = (r << 3) | (r >> 2);
+ dst_y[0] = RGBToY(r, g, b);
+ src_rgb565 += 2;
+ dst_y += 1;
+ }
+}
+
+void ARGB1555ToYRow_C(const uint8* src_argb1555, uint8* dst_y, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_argb1555[0] & 0x1f;
+ uint8 g = (src_argb1555[0] >> 5) | ((src_argb1555[1] & 0x03) << 3);
+ uint8 r = (src_argb1555[1] & 0x7c) >> 2;
+ b = (b << 3) | (b >> 2);
+ g = (g << 3) | (g >> 2);
+ r = (r << 3) | (r >> 2);
+ dst_y[0] = RGBToY(r, g, b);
+ src_argb1555 += 2;
+ dst_y += 1;
+ }
+}
+
+void ARGB4444ToYRow_C(const uint8* src_argb4444, uint8* dst_y, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 b = src_argb4444[0] & 0x0f;
+ uint8 g = src_argb4444[0] >> 4;
+ uint8 r = src_argb4444[1] & 0x0f;
+ b = (b << 4) | b;
+ g = (g << 4) | g;
+ r = (r << 4) | r;
+ dst_y[0] = RGBToY(r, g, b);
+ src_argb4444 += 2;
+ dst_y += 1;
+ }
+}
+
+void RGB565ToUVRow_C(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int width) {
+ const uint8* next_rgb565 = src_rgb565 + src_stride_rgb565;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 b0 = src_rgb565[0] & 0x1f;
+ uint8 g0 = (src_rgb565[0] >> 5) | ((src_rgb565[1] & 0x07) << 3);
+ uint8 r0 = src_rgb565[1] >> 3;
+ uint8 b1 = src_rgb565[2] & 0x1f;
+ uint8 g1 = (src_rgb565[2] >> 5) | ((src_rgb565[3] & 0x07) << 3);
+ uint8 r1 = src_rgb565[3] >> 3;
+ uint8 b2 = next_rgb565[0] & 0x1f;
+ uint8 g2 = (next_rgb565[0] >> 5) | ((next_rgb565[1] & 0x07) << 3);
+ uint8 r2 = next_rgb565[1] >> 3;
+ uint8 b3 = next_rgb565[2] & 0x1f;
+ uint8 g3 = (next_rgb565[2] >> 5) | ((next_rgb565[3] & 0x07) << 3);
+ uint8 r3 = next_rgb565[3] >> 3;
+ uint8 b = (b0 + b1 + b2 + b3); // 565 * 4 = 787.
+ uint8 g = (g0 + g1 + g2 + g3);
+ uint8 r = (r0 + r1 + r2 + r3);
+ b = (b << 1) | (b >> 6); // 787 -> 888.
+ r = (r << 1) | (r >> 6);
+ dst_u[0] = RGBToU(r, g, b);
+ dst_v[0] = RGBToV(r, g, b);
+ src_rgb565 += 4;
+ next_rgb565 += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+ if (width & 1) {
+ uint8 b0 = src_rgb565[0] & 0x1f;
+ uint8 g0 = (src_rgb565[0] >> 5) | ((src_rgb565[1] & 0x07) << 3);
+ uint8 r0 = src_rgb565[1] >> 3;
+ uint8 b2 = next_rgb565[0] & 0x1f;
+ uint8 g2 = (next_rgb565[0] >> 5) | ((next_rgb565[1] & 0x07) << 3);
+ uint8 r2 = next_rgb565[1] >> 3;
+ uint8 b = (b0 + b2); // 565 * 2 = 676.
+ uint8 g = (g0 + g2);
+ uint8 r = (r0 + r2);
+ b = (b << 2) | (b >> 4); // 676 -> 888
+ g = (g << 1) | (g >> 6);
+ r = (r << 2) | (r >> 4);
+ dst_u[0] = RGBToU(r, g, b);
+ dst_v[0] = RGBToV(r, g, b);
+ }
+}
+
+void ARGB1555ToUVRow_C(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int width) {
+ const uint8* next_argb1555 = src_argb1555 + src_stride_argb1555;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 b0 = src_argb1555[0] & 0x1f;
+ uint8 g0 = (src_argb1555[0] >> 5) | ((src_argb1555[1] & 0x03) << 3);
+ uint8 r0 = (src_argb1555[1] & 0x7c) >> 2;
+ uint8 b1 = src_argb1555[2] & 0x1f;
+ uint8 g1 = (src_argb1555[2] >> 5) | ((src_argb1555[3] & 0x03) << 3);
+ uint8 r1 = (src_argb1555[3] & 0x7c) >> 2;
+ uint8 b2 = next_argb1555[0] & 0x1f;
+ uint8 g2 = (next_argb1555[0] >> 5) | ((next_argb1555[1] & 0x03) << 3);
+ uint8 r2 = (next_argb1555[1] & 0x7c) >> 2;
+ uint8 b3 = next_argb1555[2] & 0x1f;
+ uint8 g3 = (next_argb1555[2] >> 5) | ((next_argb1555[3] & 0x03) << 3);
+ uint8 r3 = (next_argb1555[3] & 0x7c) >> 2;
+ uint8 b = (b0 + b1 + b2 + b3); // 555 * 4 = 777.
+ uint8 g = (g0 + g1 + g2 + g3);
+ uint8 r = (r0 + r1 + r2 + r3);
+ b = (b << 1) | (b >> 6); // 777 -> 888.
+ g = (g << 1) | (g >> 6);
+ r = (r << 1) | (r >> 6);
+ dst_u[0] = RGBToU(r, g, b);
+ dst_v[0] = RGBToV(r, g, b);
+ src_argb1555 += 4;
+ next_argb1555 += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+ if (width & 1) {
+ uint8 b0 = src_argb1555[0] & 0x1f;
+ uint8 g0 = (src_argb1555[0] >> 5) | ((src_argb1555[1] & 0x03) << 3);
+ uint8 r0 = (src_argb1555[1] & 0x7c) >> 2;
+ uint8 b2 = next_argb1555[0] & 0x1f;
+ uint8 g2 = (next_argb1555[0] >> 5) | ((next_argb1555[1] & 0x03) << 3);
+ uint8 r2 = next_argb1555[1] >> 3;
+ uint8 b = (b0 + b2); // 555 * 2 = 666.
+ uint8 g = (g0 + g2);
+ uint8 r = (r0 + r2);
+ b = (b << 2) | (b >> 4); // 666 -> 888.
+ g = (g << 2) | (g >> 4);
+ r = (r << 2) | (r >> 4);
+ dst_u[0] = RGBToU(r, g, b);
+ dst_v[0] = RGBToV(r, g, b);
+ }
+}
+
+void ARGB4444ToUVRow_C(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int width) {
+ const uint8* next_argb4444 = src_argb4444 + src_stride_argb4444;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 b0 = src_argb4444[0] & 0x0f;
+ uint8 g0 = src_argb4444[0] >> 4;
+ uint8 r0 = src_argb4444[1] & 0x0f;
+ uint8 b1 = src_argb4444[2] & 0x0f;
+ uint8 g1 = src_argb4444[2] >> 4;
+ uint8 r1 = src_argb4444[3] & 0x0f;
+ uint8 b2 = next_argb4444[0] & 0x0f;
+ uint8 g2 = next_argb4444[0] >> 4;
+ uint8 r2 = next_argb4444[1] & 0x0f;
+ uint8 b3 = next_argb4444[2] & 0x0f;
+ uint8 g3 = next_argb4444[2] >> 4;
+ uint8 r3 = next_argb4444[3] & 0x0f;
+ uint8 b = (b0 + b1 + b2 + b3); // 444 * 4 = 666.
+ uint8 g = (g0 + g1 + g2 + g3);
+ uint8 r = (r0 + r1 + r2 + r3);
+ b = (b << 2) | (b >> 4); // 666 -> 888.
+ g = (g << 2) | (g >> 4);
+ r = (r << 2) | (r >> 4);
+ dst_u[0] = RGBToU(r, g, b);
+ dst_v[0] = RGBToV(r, g, b);
+ src_argb4444 += 4;
+ next_argb4444 += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+ if (width & 1) {
+ uint8 b0 = src_argb4444[0] & 0x0f;
+ uint8 g0 = src_argb4444[0] >> 4;
+ uint8 r0 = src_argb4444[1] & 0x0f;
+ uint8 b2 = next_argb4444[0] & 0x0f;
+ uint8 g2 = next_argb4444[0] >> 4;
+ uint8 r2 = next_argb4444[1] & 0x0f;
+ uint8 b = (b0 + b2); // 444 * 2 = 555.
+ uint8 g = (g0 + g2);
+ uint8 r = (r0 + r2);
+ b = (b << 3) | (b >> 2); // 555 -> 888.
+ g = (g << 3) | (g >> 2);
+ r = (r << 3) | (r >> 2);
+ dst_u[0] = RGBToU(r, g, b);
+ dst_v[0] = RGBToV(r, g, b);
+ }
+}
+
+void ARGBToUV444Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 ab = src_argb[0];
+ uint8 ag = src_argb[1];
+ uint8 ar = src_argb[2];
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ src_argb += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+}
+
+void ARGBToUV422Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 ab = (src_argb[0] + src_argb[4]) >> 1;
+ uint8 ag = (src_argb[1] + src_argb[5]) >> 1;
+ uint8 ar = (src_argb[2] + src_argb[6]) >> 1;
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ src_argb += 8;
+ dst_u += 1;
+ dst_v += 1;
+ }
+ if (width & 1) {
+ uint8 ab = src_argb[0];
+ uint8 ag = src_argb[1];
+ uint8 ar = src_argb[2];
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ }
+}
+
+void ARGBToUV411Row_C(const uint8* src_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ int x;
+ for (x = 0; x < width - 3; x += 4) {
+ uint8 ab = (src_argb[0] + src_argb[4] + src_argb[8] + src_argb[12]) >> 2;
+ uint8 ag = (src_argb[1] + src_argb[5] + src_argb[9] + src_argb[13]) >> 2;
+ uint8 ar = (src_argb[2] + src_argb[6] + src_argb[10] + src_argb[14]) >> 2;
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ src_argb += 16;
+ dst_u += 1;
+ dst_v += 1;
+ }
+ if ((width & 3) == 3) {
+ uint8 ab = (src_argb[0] + src_argb[4] + src_argb[8]) / 3;
+ uint8 ag = (src_argb[1] + src_argb[5] + src_argb[9]) / 3;
+ uint8 ar = (src_argb[2] + src_argb[6] + src_argb[10]) / 3;
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ } else if ((width & 3) == 2) {
+ uint8 ab = (src_argb[0] + src_argb[4]) >> 1;
+ uint8 ag = (src_argb[1] + src_argb[5]) >> 1;
+ uint8 ar = (src_argb[2] + src_argb[6]) >> 1;
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ } else if ((width & 3) == 1) {
+ uint8 ab = src_argb[0];
+ uint8 ag = src_argb[1];
+ uint8 ar = src_argb[2];
+ dst_u[0] = RGBToU(ar, ag, ab);
+ dst_v[0] = RGBToV(ar, ag, ab);
+ }
+}
+
+void ARGBGrayRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 y = RGBToYJ(src_argb[2], src_argb[1], src_argb[0]);
+ dst_argb[2] = dst_argb[1] = dst_argb[0] = y;
+ dst_argb[3] = src_argb[3];
+ dst_argb += 4;
+ src_argb += 4;
+ }
+}
+
+// Convert a row of image to Sepia tone.
+void ARGBSepiaRow_C(uint8* dst_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ int b = dst_argb[0];
+ int g = dst_argb[1];
+ int r = dst_argb[2];
+ int sb = (b * 17 + g * 68 + r * 35) >> 7;
+ int sg = (b * 22 + g * 88 + r * 45) >> 7;
+ int sr = (b * 24 + g * 98 + r * 50) >> 7;
+ // b does not over flow. a is preserved from original.
+ dst_argb[0] = sb;
+ dst_argb[1] = clamp255(sg);
+ dst_argb[2] = clamp255(sr);
+ dst_argb += 4;
+ }
+}
+
+// Apply color matrix to a row of image. Matrix is signed.
+// TODO(fbarchard): Consider adding rounding (+32).
+void ARGBColorMatrixRow_C(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ int b = src_argb[0];
+ int g = src_argb[1];
+ int r = src_argb[2];
+ int a = src_argb[3];
+ int sb = (b * matrix_argb[0] + g * matrix_argb[1] +
+ r * matrix_argb[2] + a * matrix_argb[3]) >> 6;
+ int sg = (b * matrix_argb[4] + g * matrix_argb[5] +
+ r * matrix_argb[6] + a * matrix_argb[7]) >> 6;
+ int sr = (b * matrix_argb[8] + g * matrix_argb[9] +
+ r * matrix_argb[10] + a * matrix_argb[11]) >> 6;
+ int sa = (b * matrix_argb[12] + g * matrix_argb[13] +
+ r * matrix_argb[14] + a * matrix_argb[15]) >> 6;
+ dst_argb[0] = Clamp(sb);
+ dst_argb[1] = Clamp(sg);
+ dst_argb[2] = Clamp(sr);
+ dst_argb[3] = Clamp(sa);
+ src_argb += 4;
+ dst_argb += 4;
+ }
+}
+
+// Apply color table to a row of image.
+void ARGBColorTableRow_C(uint8* dst_argb, const uint8* table_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ int b = dst_argb[0];
+ int g = dst_argb[1];
+ int r = dst_argb[2];
+ int a = dst_argb[3];
+ dst_argb[0] = table_argb[b * 4 + 0];
+ dst_argb[1] = table_argb[g * 4 + 1];
+ dst_argb[2] = table_argb[r * 4 + 2];
+ dst_argb[3] = table_argb[a * 4 + 3];
+ dst_argb += 4;
+ }
+}
+
+// Apply color table to a row of image.
+void RGBColorTableRow_C(uint8* dst_argb, const uint8* table_argb, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ int b = dst_argb[0];
+ int g = dst_argb[1];
+ int r = dst_argb[2];
+ dst_argb[0] = table_argb[b * 4 + 0];
+ dst_argb[1] = table_argb[g * 4 + 1];
+ dst_argb[2] = table_argb[r * 4 + 2];
+ dst_argb += 4;
+ }
+}
+
+void ARGBQuantizeRow_C(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ int b = dst_argb[0];
+ int g = dst_argb[1];
+ int r = dst_argb[2];
+ dst_argb[0] = (b * scale >> 16) * interval_size + interval_offset;
+ dst_argb[1] = (g * scale >> 16) * interval_size + interval_offset;
+ dst_argb[2] = (r * scale >> 16) * interval_size + interval_offset;
+ dst_argb += 4;
+ }
+}
+
+#define REPEAT8(v) (v) | ((v) << 8)
+#define SHADE(f, v) v * f >> 24
+
+void ARGBShadeRow_C(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value) {
+ const uint32 b_scale = REPEAT8(value & 0xff);
+ const uint32 g_scale = REPEAT8((value >> 8) & 0xff);
+ const uint32 r_scale = REPEAT8((value >> 16) & 0xff);
+ const uint32 a_scale = REPEAT8(value >> 24);
+
+ int i;
+ for (i = 0; i < width; ++i) {
+ const uint32 b = REPEAT8(src_argb[0]);
+ const uint32 g = REPEAT8(src_argb[1]);
+ const uint32 r = REPEAT8(src_argb[2]);
+ const uint32 a = REPEAT8(src_argb[3]);
+ dst_argb[0] = SHADE(b, b_scale);
+ dst_argb[1] = SHADE(g, g_scale);
+ dst_argb[2] = SHADE(r, r_scale);
+ dst_argb[3] = SHADE(a, a_scale);
+ src_argb += 4;
+ dst_argb += 4;
+ }
+}
+#undef REPEAT8
+#undef SHADE
+
+#define REPEAT8(v) (v) | ((v) << 8)
+#define SHADE(f, v) v * f >> 16
+
+void ARGBMultiplyRow_C(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ const uint32 b = REPEAT8(src_argb0[0]);
+ const uint32 g = REPEAT8(src_argb0[1]);
+ const uint32 r = REPEAT8(src_argb0[2]);
+ const uint32 a = REPEAT8(src_argb0[3]);
+ const uint32 b_scale = src_argb1[0];
+ const uint32 g_scale = src_argb1[1];
+ const uint32 r_scale = src_argb1[2];
+ const uint32 a_scale = src_argb1[3];
+ dst_argb[0] = SHADE(b, b_scale);
+ dst_argb[1] = SHADE(g, g_scale);
+ dst_argb[2] = SHADE(r, r_scale);
+ dst_argb[3] = SHADE(a, a_scale);
+ src_argb0 += 4;
+ src_argb1 += 4;
+ dst_argb += 4;
+ }
+}
+#undef REPEAT8
+#undef SHADE
+
+#define SHADE(f, v) clamp255(v + f)
+
+void ARGBAddRow_C(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ const int b = src_argb0[0];
+ const int g = src_argb0[1];
+ const int r = src_argb0[2];
+ const int a = src_argb0[3];
+ const int b_add = src_argb1[0];
+ const int g_add = src_argb1[1];
+ const int r_add = src_argb1[2];
+ const int a_add = src_argb1[3];
+ dst_argb[0] = SHADE(b, b_add);
+ dst_argb[1] = SHADE(g, g_add);
+ dst_argb[2] = SHADE(r, r_add);
+ dst_argb[3] = SHADE(a, a_add);
+ src_argb0 += 4;
+ src_argb1 += 4;
+ dst_argb += 4;
+ }
+}
+#undef SHADE
+
+#define SHADE(f, v) clamp0(f - v)
+
+void ARGBSubtractRow_C(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ const int b = src_argb0[0];
+ const int g = src_argb0[1];
+ const int r = src_argb0[2];
+ const int a = src_argb0[3];
+ const int b_sub = src_argb1[0];
+ const int g_sub = src_argb1[1];
+ const int r_sub = src_argb1[2];
+ const int a_sub = src_argb1[3];
+ dst_argb[0] = SHADE(b, b_sub);
+ dst_argb[1] = SHADE(g, g_sub);
+ dst_argb[2] = SHADE(r, r_sub);
+ dst_argb[3] = SHADE(a, a_sub);
+ src_argb0 += 4;
+ src_argb1 += 4;
+ dst_argb += 4;
+ }
+}
+#undef SHADE
+
+// Sobel functions which mimics SSSE3.
+void SobelXRow_C(const uint8* src_y0, const uint8* src_y1, const uint8* src_y2,
+ uint8* dst_sobelx, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ int a = src_y0[i];
+ int b = src_y1[i];
+ int c = src_y2[i];
+ int a_sub = src_y0[i + 2];
+ int b_sub = src_y1[i + 2];
+ int c_sub = src_y2[i + 2];
+ int a_diff = a - a_sub;
+ int b_diff = b - b_sub;
+ int c_diff = c - c_sub;
+ int sobel = Abs(a_diff + b_diff * 2 + c_diff);
+ dst_sobelx[i] = (uint8)(clamp255(sobel));
+ }
+}
+
+void SobelYRow_C(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ int a = src_y0[i + 0];
+ int b = src_y0[i + 1];
+ int c = src_y0[i + 2];
+ int a_sub = src_y1[i + 0];
+ int b_sub = src_y1[i + 1];
+ int c_sub = src_y1[i + 2];
+ int a_diff = a - a_sub;
+ int b_diff = b - b_sub;
+ int c_diff = c - c_sub;
+ int sobel = Abs(a_diff + b_diff * 2 + c_diff);
+ dst_sobely[i] = (uint8)(clamp255(sobel));
+ }
+}
+
+void SobelRow_C(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ int r = src_sobelx[i];
+ int b = src_sobely[i];
+ int s = clamp255(r + b);
+ dst_argb[0] = (uint8)(s);
+ dst_argb[1] = (uint8)(s);
+ dst_argb[2] = (uint8)(s);
+ dst_argb[3] = (uint8)(255u);
+ dst_argb += 4;
+ }
+}
+
+void SobelToPlaneRow_C(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ int r = src_sobelx[i];
+ int b = src_sobely[i];
+ int s = clamp255(r + b);
+ dst_y[i] = (uint8)(s);
+ }
+}
+
+void SobelXYRow_C(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ int r = src_sobelx[i];
+ int b = src_sobely[i];
+ int g = clamp255(r + b);
+ dst_argb[0] = (uint8)(b);
+ dst_argb[1] = (uint8)(g);
+ dst_argb[2] = (uint8)(r);
+ dst_argb[3] = (uint8)(255u);
+ dst_argb += 4;
+ }
+}
+
+void J400ToARGBRow_C(const uint8* src_y, uint8* dst_argb, int width) {
+ // Copy a Y to RGB.
+ int x;
+ for (x = 0; x < width; ++x) {
+ uint8 y = src_y[0];
+ dst_argb[2] = dst_argb[1] = dst_argb[0] = y;
+ dst_argb[3] = 255u;
+ dst_argb += 4;
+ ++src_y;
+ }
+}
+
+// BT.601 YUV to RGB reference
+// R = (Y - 16) * 1.164 - V * -1.596
+// G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813
+// B = (Y - 16) * 1.164 - U * -2.018
+
+// Y contribution to R,G,B. Scale and bias.
+// TODO(fbarchard): Consider moving constants into a common header.
+#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
+#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */
+
+// U and V contributions to R,G,B.
+#define UB -128 /* max(-128, round(-2.018 * 64)) */
+#define UG 25 /* round(0.391 * 64) */
+#define VG 52 /* round(0.813 * 64) */
+#define VR -102 /* round(-1.596 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BB (UB * 128 + YGB)
+#define BG (UG * 128 + VG * 128 + YGB)
+#define BR (VR * 128 + YGB)
+
+// C reference code that mimics the YUV assembly.
+static __inline void YuvPixel(uint8 y, uint8 u, uint8 v,
+ uint8* b, uint8* g, uint8* r) {
+ uint32 y1 = (uint32)(y * 0x0101 * YG) >> 16;
+ *b = Clamp((int32)(-(u * UB) + y1 + BB) >> 6);
+ *g = Clamp((int32)(-(v * VG + u * UG) + y1 + BG) >> 6);
+ *r = Clamp((int32)(-(v * VR)+ y1 + BR) >> 6);
+}
+
+// C reference code that mimics the YUV assembly.
+static __inline void YPixel(uint8 y, uint8* b, uint8* g, uint8* r) {
+ uint32 y1 = (uint32)(y * 0x0101 * YG) >> 16;
+ *b = Clamp((int32)(y1 + YGB) >> 6);
+ *g = Clamp((int32)(y1 + YGB) >> 6);
+ *r = Clamp((int32)(y1 + YGB) >> 6);
+}
+
+#undef YG
+#undef YGB
+#undef UB
+#undef UG
+#undef VG
+#undef VR
+#undef BB
+#undef BG
+#undef BR
+
+// JPEG YUV to RGB reference
+// * R = Y - V * -1.40200
+// * G = Y - U * 0.34414 - V * 0.71414
+// * B = Y - U * -1.77200
+
+// Y contribution to R,G,B. Scale and bias.
+// TODO(fbarchard): Consider moving constants into a common header.
+#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
+#define YGBJ 32 /* 64 / 2 */
+
+// U and V contributions to R,G,B.
+#define UBJ -113 /* round(-1.77200 * 64) */
+#define UGJ 22 /* round(0.34414 * 64) */
+#define VGJ 46 /* round(0.71414 * 64) */
+#define VRJ -90 /* round(-1.40200 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BBJ (UBJ * 128 + YGBJ)
+#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ)
+#define BRJ (VRJ * 128 + YGBJ)
+
+// C reference code that mimics the YUV assembly.
+static __inline void YuvJPixel(uint8 y, uint8 u, uint8 v,
+ uint8* b, uint8* g, uint8* r) {
+ uint32 y1 = (uint32)(y * 0x0101 * YGJ) >> 16;
+ *b = Clamp((int32)(-(u * UBJ) + y1 + BBJ) >> 6);
+ *g = Clamp((int32)(-(v * VGJ + u * UGJ) + y1 + BGJ) >> 6);
+ *r = Clamp((int32)(-(v * VRJ) + y1 + BRJ) >> 6);
+}
+
+#undef YGJ
+#undef YGBJ
+#undef UBJ
+#undef UGJ
+#undef VGJ
+#undef VRJ
+#undef BBJ
+#undef BGJ
+#undef BRJ
+
+#if !defined(LIBYUV_DISABLE_NEON) && \
+ (defined(__ARM_NEON__) || defined(__aarch64__) || defined(LIBYUV_NEON))
+// C mimic assembly.
+// TODO(fbarchard): Remove subsampling from Neon.
+void I444ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint8 u = (src_u[0] + src_u[1] + 1) >> 1;
+ uint8 v = (src_v[0] + src_v[1] + 1) >> 1;
+ YuvPixel(src_y[0], u, v, rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_y[1], u, v, rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ src_u += 2;
+ src_v += 2;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ }
+}
+#else
+void I444ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width; ++x) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ src_y += 1;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 4; // Advance 1 pixel.
+ }
+}
+#endif
+
+// Also used for 420
+void I422ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void J422ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvJPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvJPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvJPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void I422ToRGB24Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 3, rgb_buf + 4, rgb_buf + 5);
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 6; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ }
+}
+
+void I422ToRAWRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 2, rgb_buf + 1, rgb_buf + 0);
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 5, rgb_buf + 4, rgb_buf + 3);
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 6; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 2, rgb_buf + 1, rgb_buf + 0);
+ }
+}
+
+void I422ToARGB4444Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width) {
+ uint8 b0;
+ uint8 g0;
+ uint8 r0;
+ uint8 b1;
+ uint8 g1;
+ uint8 r1;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0);
+ YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1);
+ b0 = b0 >> 4;
+ g0 = g0 >> 4;
+ r0 = r0 >> 4;
+ b1 = b1 >> 4;
+ g1 = g1 >> 4;
+ r1 = r1 >> 4;
+ *(uint32*)(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) |
+ (b1 << 16) | (g1 << 20) | (r1 << 24) | 0xf000f000;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ dst_argb4444 += 4; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0);
+ b0 = b0 >> 4;
+ g0 = g0 >> 4;
+ r0 = r0 >> 4;
+ *(uint16*)(dst_argb4444) = b0 | (g0 << 4) | (r0 << 8) |
+ 0xf000;
+ }
+}
+
+void I422ToARGB1555Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb1555,
+ int width) {
+ uint8 b0;
+ uint8 g0;
+ uint8 r0;
+ uint8 b1;
+ uint8 g1;
+ uint8 r1;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0);
+ YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1);
+ b0 = b0 >> 3;
+ g0 = g0 >> 3;
+ r0 = r0 >> 3;
+ b1 = b1 >> 3;
+ g1 = g1 >> 3;
+ r1 = r1 >> 3;
+ *(uint32*)(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) |
+ (b1 << 16) | (g1 << 21) | (r1 << 26) | 0x80008000;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ dst_argb1555 += 4; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0);
+ b0 = b0 >> 3;
+ g0 = g0 >> 3;
+ r0 = r0 >> 3;
+ *(uint16*)(dst_argb1555) = b0 | (g0 << 5) | (r0 << 10) |
+ 0x8000;
+ }
+}
+
+void I422ToRGB565Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width) {
+ uint8 b0;
+ uint8 g0;
+ uint8 r0;
+ uint8 b1;
+ uint8 g1;
+ uint8 r1;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0);
+ YuvPixel(src_y[1], src_u[0], src_v[0], &b1, &g1, &r1);
+ b0 = b0 >> 3;
+ g0 = g0 >> 2;
+ r0 = r0 >> 3;
+ b1 = b1 >> 3;
+ g1 = g1 >> 2;
+ r1 = r1 >> 3;
+ *(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
+ (b1 << 16) | (g1 << 21) | (r1 << 27);
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ dst_rgb565 += 4; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0], &b0, &g0, &r0);
+ b0 = b0 >> 3;
+ g0 = g0 >> 2;
+ r0 = r0 >> 3;
+ *(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
+ }
+}
+
+void I411ToARGBRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 3; x += 4) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ YuvPixel(src_y[2], src_u[0], src_v[0],
+ rgb_buf + 8, rgb_buf + 9, rgb_buf + 10);
+ rgb_buf[11] = 255;
+ YuvPixel(src_y[3], src_u[0], src_v[0],
+ rgb_buf + 12, rgb_buf + 13, rgb_buf + 14);
+ rgb_buf[15] = 255;
+ src_y += 4;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 16; // Advance 4 pixels.
+ }
+ if (width & 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void NV12ToARGBRow_C(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_uv[0], src_uv[1],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_y[1], src_uv[0], src_uv[1],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ src_uv += 2;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_uv[0], src_uv[1],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void NV21ToARGBRow_C(const uint8* src_y,
+ const uint8* src_vu,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_vu[1], src_vu[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+
+ YuvPixel(src_y[1], src_vu[1], src_vu[0],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+
+ src_y += 2;
+ src_vu += 2;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_vu[1], src_vu[0],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void NV12ToRGB565Row_C(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_rgb565,
+ int width) {
+ uint8 b0;
+ uint8 g0;
+ uint8 r0;
+ uint8 b1;
+ uint8 g1;
+ uint8 r1;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_uv[0], src_uv[1], &b0, &g0, &r0);
+ YuvPixel(src_y[1], src_uv[0], src_uv[1], &b1, &g1, &r1);
+ b0 = b0 >> 3;
+ g0 = g0 >> 2;
+ r0 = r0 >> 3;
+ b1 = b1 >> 3;
+ g1 = g1 >> 2;
+ r1 = r1 >> 3;
+ *(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
+ (b1 << 16) | (g1 << 21) | (r1 << 27);
+ src_y += 2;
+ src_uv += 2;
+ dst_rgb565 += 4; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_uv[0], src_uv[1], &b0, &g0, &r0);
+ b0 = b0 >> 3;
+ g0 = g0 >> 2;
+ r0 = r0 >> 3;
+ *(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
+ }
+}
+
+void NV21ToRGB565Row_C(const uint8* src_y,
+ const uint8* vsrc_u,
+ uint8* dst_rgb565,
+ int width) {
+ uint8 b0;
+ uint8 g0;
+ uint8 r0;
+ uint8 b1;
+ uint8 g1;
+ uint8 r1;
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], vsrc_u[1], vsrc_u[0], &b0, &g0, &r0);
+ YuvPixel(src_y[1], vsrc_u[1], vsrc_u[0], &b1, &g1, &r1);
+ b0 = b0 >> 3;
+ g0 = g0 >> 2;
+ r0 = r0 >> 3;
+ b1 = b1 >> 3;
+ g1 = g1 >> 2;
+ r1 = r1 >> 3;
+ *(uint32*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11) |
+ (b1 << 16) | (g1 << 21) | (r1 << 27);
+ src_y += 2;
+ vsrc_u += 2;
+ dst_rgb565 += 4; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], vsrc_u[1], vsrc_u[0], &b0, &g0, &r0);
+ b0 = b0 >> 3;
+ g0 = g0 >> 2;
+ r0 = r0 >> 3;
+ *(uint16*)(dst_rgb565) = b0 | (g0 << 5) | (r0 << 11);
+ }
+}
+
+void YUY2ToARGBRow_C(const uint8* src_yuy2,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_yuy2[0], src_yuy2[1], src_yuy2[3],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_yuy2[2], src_yuy2[1], src_yuy2[3],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_yuy2 += 4;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_yuy2[0], src_yuy2[1], src_yuy2[3],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void UYVYToARGBRow_C(const uint8* src_uyvy,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_uyvy[1], src_uyvy[0], src_uyvy[2],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YuvPixel(src_uyvy[3], src_uyvy[0], src_uyvy[2],
+ rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_uyvy += 4;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_uyvy[1], src_uyvy[0], src_uyvy[2],
+ rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void I422ToBGRARow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 3, rgb_buf + 2, rgb_buf + 1);
+ rgb_buf[0] = 255;
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 7, rgb_buf + 6, rgb_buf + 5);
+ rgb_buf[4] = 255;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 3, rgb_buf + 2, rgb_buf + 1);
+ rgb_buf[0] = 255;
+ }
+}
+
+void I422ToABGRRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 2, rgb_buf + 1, rgb_buf + 0);
+ rgb_buf[3] = 255;
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 6, rgb_buf + 5, rgb_buf + 4);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 2, rgb_buf + 1, rgb_buf + 0);
+ rgb_buf[3] = 255;
+ }
+}
+
+void I422ToRGBARow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* rgb_buf,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 1, rgb_buf + 2, rgb_buf + 3);
+ rgb_buf[0] = 255;
+ YuvPixel(src_y[1], src_u[0], src_v[0],
+ rgb_buf + 5, rgb_buf + 6, rgb_buf + 7);
+ rgb_buf[4] = 255;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YuvPixel(src_y[0], src_u[0], src_v[0],
+ rgb_buf + 1, rgb_buf + 2, rgb_buf + 3);
+ rgb_buf[0] = 255;
+ }
+}
+
+void I400ToARGBRow_C(const uint8* src_y, uint8* rgb_buf, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ YPixel(src_y[0], rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ YPixel(src_y[1], rgb_buf + 4, rgb_buf + 5, rgb_buf + 6);
+ rgb_buf[7] = 255;
+ src_y += 2;
+ rgb_buf += 8; // Advance 2 pixels.
+ }
+ if (width & 1) {
+ YPixel(src_y[0], rgb_buf + 0, rgb_buf + 1, rgb_buf + 2);
+ rgb_buf[3] = 255;
+ }
+}
+
+void MirrorRow_C(const uint8* src, uint8* dst, int width) {
+ int x;
+ src += width - 1;
+ for (x = 0; x < width - 1; x += 2) {
+ dst[x] = src[0];
+ dst[x + 1] = src[-1];
+ src -= 2;
+ }
+ if (width & 1) {
+ dst[width - 1] = src[0];
+ }
+}
+
+void MirrorUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) {
+ int x;
+ src_uv += (width - 1) << 1;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_u[x] = src_uv[0];
+ dst_u[x + 1] = src_uv[-2];
+ dst_v[x] = src_uv[1];
+ dst_v[x + 1] = src_uv[-2 + 1];
+ src_uv -= 4;
+ }
+ if (width & 1) {
+ dst_u[width - 1] = src_uv[0];
+ dst_v[width - 1] = src_uv[1];
+ }
+}
+
+void ARGBMirrorRow_C(const uint8* src, uint8* dst, int width) {
+ int x;
+ const uint32* src32 = (const uint32*)(src);
+ uint32* dst32 = (uint32*)(dst);
+ src32 += width - 1;
+ for (x = 0; x < width - 1; x += 2) {
+ dst32[x] = src32[0];
+ dst32[x + 1] = src32[-1];
+ src32 -= 2;
+ }
+ if (width & 1) {
+ dst32[width - 1] = src32[0];
+ }
+}
+
+void SplitUVRow_C(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_u[x] = src_uv[0];
+ dst_u[x + 1] = src_uv[2];
+ dst_v[x] = src_uv[1];
+ dst_v[x + 1] = src_uv[3];
+ src_uv += 4;
+ }
+ if (width & 1) {
+ dst_u[width - 1] = src_uv[0];
+ dst_v[width - 1] = src_uv[1];
+ }
+}
+
+void MergeUVRow_C(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_uv[0] = src_u[x];
+ dst_uv[1] = src_v[x];
+ dst_uv[2] = src_u[x + 1];
+ dst_uv[3] = src_v[x + 1];
+ dst_uv += 4;
+ }
+ if (width & 1) {
+ dst_uv[0] = src_u[width - 1];
+ dst_uv[1] = src_v[width - 1];
+ }
+}
+
+void CopyRow_C(const uint8* src, uint8* dst, int count) {
+ memcpy(dst, src, count);
+}
+
+void CopyRow_16_C(const uint16* src, uint16* dst, int count) {
+ memcpy(dst, src, count * 2);
+}
+
+void SetRow_C(uint8* dst, uint8 v8, int width) {
+ memset(dst, v8, width);
+}
+
+void ARGBSetRow_C(uint8* dst_argb, uint32 v32, int width) {
+ uint32* d = (uint32*)(dst_argb);
+ int x;
+ for (x = 0; x < width; ++x) {
+ d[x] = v32;
+ }
+}
+
+// Filter 2 rows of YUY2 UV's (422) into U and V (420).
+void YUY2ToUVRow_C(const uint8* src_yuy2, int src_stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int width) {
+ // Output a row of UV values, filtering 2 rows of YUY2.
+ int x;
+ for (x = 0; x < width; x += 2) {
+ dst_u[0] = (src_yuy2[1] + src_yuy2[src_stride_yuy2 + 1] + 1) >> 1;
+ dst_v[0] = (src_yuy2[3] + src_yuy2[src_stride_yuy2 + 3] + 1) >> 1;
+ src_yuy2 += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+}
+
+// Copy row of YUY2 UV's (422) into U and V (422).
+void YUY2ToUV422Row_C(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int width) {
+ // Output a row of UV values.
+ int x;
+ for (x = 0; x < width; x += 2) {
+ dst_u[0] = src_yuy2[1];
+ dst_v[0] = src_yuy2[3];
+ src_yuy2 += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+}
+
+// Copy row of YUY2 Y's (422) into Y (420/422).
+void YUY2ToYRow_C(const uint8* src_yuy2, uint8* dst_y, int width) {
+ // Output a row of Y values.
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_y[x] = src_yuy2[0];
+ dst_y[x + 1] = src_yuy2[2];
+ src_yuy2 += 4;
+ }
+ if (width & 1) {
+ dst_y[width - 1] = src_yuy2[0];
+ }
+}
+
+// Filter 2 rows of UYVY UV's (422) into U and V (420).
+void UYVYToUVRow_C(const uint8* src_uyvy, int src_stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int width) {
+ // Output a row of UV values.
+ int x;
+ for (x = 0; x < width; x += 2) {
+ dst_u[0] = (src_uyvy[0] + src_uyvy[src_stride_uyvy + 0] + 1) >> 1;
+ dst_v[0] = (src_uyvy[2] + src_uyvy[src_stride_uyvy + 2] + 1) >> 1;
+ src_uyvy += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+}
+
+// Copy row of UYVY UV's (422) into U and V (422).
+void UYVYToUV422Row_C(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int width) {
+ // Output a row of UV values.
+ int x;
+ for (x = 0; x < width; x += 2) {
+ dst_u[0] = src_uyvy[0];
+ dst_v[0] = src_uyvy[2];
+ src_uyvy += 4;
+ dst_u += 1;
+ dst_v += 1;
+ }
+}
+
+// Copy row of UYVY Y's (422) into Y (420/422).
+void UYVYToYRow_C(const uint8* src_uyvy, uint8* dst_y, int width) {
+ // Output a row of Y values.
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_y[x] = src_uyvy[1];
+ dst_y[x + 1] = src_uyvy[3];
+ src_uyvy += 4;
+ }
+ if (width & 1) {
+ dst_y[width - 1] = src_uyvy[1];
+ }
+}
+
+#define BLEND(f, b, a) (((256 - a) * b) >> 8) + f
+
+// Blend src_argb0 over src_argb1 and store to dst_argb.
+// dst_argb may be src_argb0 or src_argb1.
+// This code mimics the SSSE3 version for better testability.
+void ARGBBlendRow_C(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ uint32 fb = src_argb0[0];
+ uint32 fg = src_argb0[1];
+ uint32 fr = src_argb0[2];
+ uint32 a = src_argb0[3];
+ uint32 bb = src_argb1[0];
+ uint32 bg = src_argb1[1];
+ uint32 br = src_argb1[2];
+ dst_argb[0] = BLEND(fb, bb, a);
+ dst_argb[1] = BLEND(fg, bg, a);
+ dst_argb[2] = BLEND(fr, br, a);
+ dst_argb[3] = 255u;
+
+ fb = src_argb0[4 + 0];
+ fg = src_argb0[4 + 1];
+ fr = src_argb0[4 + 2];
+ a = src_argb0[4 + 3];
+ bb = src_argb1[4 + 0];
+ bg = src_argb1[4 + 1];
+ br = src_argb1[4 + 2];
+ dst_argb[4 + 0] = BLEND(fb, bb, a);
+ dst_argb[4 + 1] = BLEND(fg, bg, a);
+ dst_argb[4 + 2] = BLEND(fr, br, a);
+ dst_argb[4 + 3] = 255u;
+ src_argb0 += 8;
+ src_argb1 += 8;
+ dst_argb += 8;
+ }
+
+ if (width & 1) {
+ uint32 fb = src_argb0[0];
+ uint32 fg = src_argb0[1];
+ uint32 fr = src_argb0[2];
+ uint32 a = src_argb0[3];
+ uint32 bb = src_argb1[0];
+ uint32 bg = src_argb1[1];
+ uint32 br = src_argb1[2];
+ dst_argb[0] = BLEND(fb, bb, a);
+ dst_argb[1] = BLEND(fg, bg, a);
+ dst_argb[2] = BLEND(fr, br, a);
+ dst_argb[3] = 255u;
+ }
+}
+#undef BLEND
+#define ATTENUATE(f, a) (a | (a << 8)) * (f | (f << 8)) >> 24
+
+// Multiply source RGB by alpha and store to destination.
+// This code mimics the SSSE3 version for better testability.
+void ARGBAttenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width - 1; i += 2) {
+ uint32 b = src_argb[0];
+ uint32 g = src_argb[1];
+ uint32 r = src_argb[2];
+ uint32 a = src_argb[3];
+ dst_argb[0] = ATTENUATE(b, a);
+ dst_argb[1] = ATTENUATE(g, a);
+ dst_argb[2] = ATTENUATE(r, a);
+ dst_argb[3] = a;
+ b = src_argb[4];
+ g = src_argb[5];
+ r = src_argb[6];
+ a = src_argb[7];
+ dst_argb[4] = ATTENUATE(b, a);
+ dst_argb[5] = ATTENUATE(g, a);
+ dst_argb[6] = ATTENUATE(r, a);
+ dst_argb[7] = a;
+ src_argb += 8;
+ dst_argb += 8;
+ }
+
+ if (width & 1) {
+ const uint32 b = src_argb[0];
+ const uint32 g = src_argb[1];
+ const uint32 r = src_argb[2];
+ const uint32 a = src_argb[3];
+ dst_argb[0] = ATTENUATE(b, a);
+ dst_argb[1] = ATTENUATE(g, a);
+ dst_argb[2] = ATTENUATE(r, a);
+ dst_argb[3] = a;
+ }
+}
+#undef ATTENUATE
+
+// Divide source RGB by alpha and store to destination.
+// b = (b * 255 + (a / 2)) / a;
+// g = (g * 255 + (a / 2)) / a;
+// r = (r * 255 + (a / 2)) / a;
+// Reciprocal method is off by 1 on some values. ie 125
+// 8.8 fixed point inverse table with 1.0 in upper short and 1 / a in lower.
+#define T(a) 0x01000000 + (0x10000 / a)
+const uint32 fixed_invtbl8[256] = {
+ 0x01000000, 0x0100ffff, T(0x02), T(0x03), T(0x04), T(0x05), T(0x06), T(0x07),
+ T(0x08), T(0x09), T(0x0a), T(0x0b), T(0x0c), T(0x0d), T(0x0e), T(0x0f),
+ T(0x10), T(0x11), T(0x12), T(0x13), T(0x14), T(0x15), T(0x16), T(0x17),
+ T(0x18), T(0x19), T(0x1a), T(0x1b), T(0x1c), T(0x1d), T(0x1e), T(0x1f),
+ T(0x20), T(0x21), T(0x22), T(0x23), T(0x24), T(0x25), T(0x26), T(0x27),
+ T(0x28), T(0x29), T(0x2a), T(0x2b), T(0x2c), T(0x2d), T(0x2e), T(0x2f),
+ T(0x30), T(0x31), T(0x32), T(0x33), T(0x34), T(0x35), T(0x36), T(0x37),
+ T(0x38), T(0x39), T(0x3a), T(0x3b), T(0x3c), T(0x3d), T(0x3e), T(0x3f),
+ T(0x40), T(0x41), T(0x42), T(0x43), T(0x44), T(0x45), T(0x46), T(0x47),
+ T(0x48), T(0x49), T(0x4a), T(0x4b), T(0x4c), T(0x4d), T(0x4e), T(0x4f),
+ T(0x50), T(0x51), T(0x52), T(0x53), T(0x54), T(0x55), T(0x56), T(0x57),
+ T(0x58), T(0x59), T(0x5a), T(0x5b), T(0x5c), T(0x5d), T(0x5e), T(0x5f),
+ T(0x60), T(0x61), T(0x62), T(0x63), T(0x64), T(0x65), T(0x66), T(0x67),
+ T(0x68), T(0x69), T(0x6a), T(0x6b), T(0x6c), T(0x6d), T(0x6e), T(0x6f),
+ T(0x70), T(0x71), T(0x72), T(0x73), T(0x74), T(0x75), T(0x76), T(0x77),
+ T(0x78), T(0x79), T(0x7a), T(0x7b), T(0x7c), T(0x7d), T(0x7e), T(0x7f),
+ T(0x80), T(0x81), T(0x82), T(0x83), T(0x84), T(0x85), T(0x86), T(0x87),
+ T(0x88), T(0x89), T(0x8a), T(0x8b), T(0x8c), T(0x8d), T(0x8e), T(0x8f),
+ T(0x90), T(0x91), T(0x92), T(0x93), T(0x94), T(0x95), T(0x96), T(0x97),
+ T(0x98), T(0x99), T(0x9a), T(0x9b), T(0x9c), T(0x9d), T(0x9e), T(0x9f),
+ T(0xa0), T(0xa1), T(0xa2), T(0xa3), T(0xa4), T(0xa5), T(0xa6), T(0xa7),
+ T(0xa8), T(0xa9), T(0xaa), T(0xab), T(0xac), T(0xad), T(0xae), T(0xaf),
+ T(0xb0), T(0xb1), T(0xb2), T(0xb3), T(0xb4), T(0xb5), T(0xb6), T(0xb7),
+ T(0xb8), T(0xb9), T(0xba), T(0xbb), T(0xbc), T(0xbd), T(0xbe), T(0xbf),
+ T(0xc0), T(0xc1), T(0xc2), T(0xc3), T(0xc4), T(0xc5), T(0xc6), T(0xc7),
+ T(0xc8), T(0xc9), T(0xca), T(0xcb), T(0xcc), T(0xcd), T(0xce), T(0xcf),
+ T(0xd0), T(0xd1), T(0xd2), T(0xd3), T(0xd4), T(0xd5), T(0xd6), T(0xd7),
+ T(0xd8), T(0xd9), T(0xda), T(0xdb), T(0xdc), T(0xdd), T(0xde), T(0xdf),
+ T(0xe0), T(0xe1), T(0xe2), T(0xe3), T(0xe4), T(0xe5), T(0xe6), T(0xe7),
+ T(0xe8), T(0xe9), T(0xea), T(0xeb), T(0xec), T(0xed), T(0xee), T(0xef),
+ T(0xf0), T(0xf1), T(0xf2), T(0xf3), T(0xf4), T(0xf5), T(0xf6), T(0xf7),
+ T(0xf8), T(0xf9), T(0xfa), T(0xfb), T(0xfc), T(0xfd), T(0xfe), 0x01000100 };
+#undef T
+
+void ARGBUnattenuateRow_C(const uint8* src_argb, uint8* dst_argb, int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ uint32 b = src_argb[0];
+ uint32 g = src_argb[1];
+ uint32 r = src_argb[2];
+ const uint32 a = src_argb[3];
+ const uint32 ia = fixed_invtbl8[a] & 0xffff; // 8.8 fixed point
+ b = (b * ia) >> 8;
+ g = (g * ia) >> 8;
+ r = (r * ia) >> 8;
+ // Clamping should not be necessary but is free in assembly.
+ dst_argb[0] = clamp255(b);
+ dst_argb[1] = clamp255(g);
+ dst_argb[2] = clamp255(r);
+ dst_argb[3] = a;
+ src_argb += 4;
+ dst_argb += 4;
+ }
+}
+
+void ComputeCumulativeSumRow_C(const uint8* row, int32* cumsum,
+ const int32* previous_cumsum, int width) {
+ int32 row_sum[4] = {0, 0, 0, 0};
+ int x;
+ for (x = 0; x < width; ++x) {
+ row_sum[0] += row[x * 4 + 0];
+ row_sum[1] += row[x * 4 + 1];
+ row_sum[2] += row[x * 4 + 2];
+ row_sum[3] += row[x * 4 + 3];
+ cumsum[x * 4 + 0] = row_sum[0] + previous_cumsum[x * 4 + 0];
+ cumsum[x * 4 + 1] = row_sum[1] + previous_cumsum[x * 4 + 1];
+ cumsum[x * 4 + 2] = row_sum[2] + previous_cumsum[x * 4 + 2];
+ cumsum[x * 4 + 3] = row_sum[3] + previous_cumsum[x * 4 + 3];
+ }
+}
+
+void CumulativeSumToAverageRow_C(const int32* tl, const int32* bl,
+ int w, int area, uint8* dst, int count) {
+ float ooa = 1.0f / area;
+ int i;
+ for (i = 0; i < count; ++i) {
+ dst[0] = (uint8)((bl[w + 0] + tl[0] - bl[0] - tl[w + 0]) * ooa);
+ dst[1] = (uint8)((bl[w + 1] + tl[1] - bl[1] - tl[w + 1]) * ooa);
+ dst[2] = (uint8)((bl[w + 2] + tl[2] - bl[2] - tl[w + 2]) * ooa);
+ dst[3] = (uint8)((bl[w + 3] + tl[3] - bl[3] - tl[w + 3]) * ooa);
+ dst += 4;
+ tl += 4;
+ bl += 4;
+ }
+}
+
+// Copy pixels from rotated source to destination row with a slope.
+LIBYUV_API
+void ARGBAffineRow_C(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* uv_dudv, int width) {
+ int i;
+ // Render a row of pixels from source into a buffer.
+ float uv[2];
+ uv[0] = uv_dudv[0];
+ uv[1] = uv_dudv[1];
+ for (i = 0; i < width; ++i) {
+ int x = (int)(uv[0]);
+ int y = (int)(uv[1]);
+ *(uint32*)(dst_argb) =
+ *(const uint32*)(src_argb + y * src_argb_stride +
+ x * 4);
+ dst_argb += 4;
+ uv[0] += uv_dudv[2];
+ uv[1] += uv_dudv[3];
+ }
+}
+
+// Blend 2 rows into 1.
+static void HalfRow_C(const uint8* src_uv, int src_uv_stride,
+ uint8* dst_uv, int pix) {
+ int x;
+ for (x = 0; x < pix; ++x) {
+ dst_uv[x] = (src_uv[x] + src_uv[src_uv_stride + x] + 1) >> 1;
+ }
+}
+
+static void HalfRow_16_C(const uint16* src_uv, int src_uv_stride,
+ uint16* dst_uv, int pix) {
+ int x;
+ for (x = 0; x < pix; ++x) {
+ dst_uv[x] = (src_uv[x] + src_uv[src_uv_stride + x] + 1) >> 1;
+ }
+}
+
+// C version 2x2 -> 2x1.
+void InterpolateRow_C(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ int width, int source_y_fraction) {
+ int y1_fraction = source_y_fraction;
+ int y0_fraction = 256 - y1_fraction;
+ const uint8* src_ptr1 = src_ptr + src_stride;
+ int x;
+ if (source_y_fraction == 0) {
+ memcpy(dst_ptr, src_ptr, width);
+ return;
+ }
+ if (source_y_fraction == 128) {
+ HalfRow_C(src_ptr, (int)(src_stride), dst_ptr, width);
+ return;
+ }
+ for (x = 0; x < width - 1; x += 2) {
+ dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8;
+ dst_ptr[1] = (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction) >> 8;
+ src_ptr += 2;
+ src_ptr1 += 2;
+ dst_ptr += 2;
+ }
+ if (width & 1) {
+ dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8;
+ }
+}
+
+void InterpolateRow_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ ptrdiff_t src_stride,
+ int width, int source_y_fraction) {
+ int y1_fraction = source_y_fraction;
+ int y0_fraction = 256 - y1_fraction;
+ const uint16* src_ptr1 = src_ptr + src_stride;
+ int x;
+ if (source_y_fraction == 0) {
+ memcpy(dst_ptr, src_ptr, width * 2);
+ return;
+ }
+ if (source_y_fraction == 128) {
+ HalfRow_16_C(src_ptr, (int)(src_stride), dst_ptr, width);
+ return;
+ }
+ for (x = 0; x < width - 1; x += 2) {
+ dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8;
+ dst_ptr[1] = (src_ptr[1] * y0_fraction + src_ptr1[1] * y1_fraction) >> 8;
+ src_ptr += 2;
+ src_ptr1 += 2;
+ dst_ptr += 2;
+ }
+ if (width & 1) {
+ dst_ptr[0] = (src_ptr[0] * y0_fraction + src_ptr1[0] * y1_fraction) >> 8;
+ }
+}
+
+// Use first 4 shuffler values to reorder ARGB channels.
+void ARGBShuffleRow_C(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ int index0 = shuffler[0];
+ int index1 = shuffler[1];
+ int index2 = shuffler[2];
+ int index3 = shuffler[3];
+ // Shuffle a row of ARGB.
+ int x;
+ for (x = 0; x < pix; ++x) {
+ // To support in-place conversion.
+ uint8 b = src_argb[index0];
+ uint8 g = src_argb[index1];
+ uint8 r = src_argb[index2];
+ uint8 a = src_argb[index3];
+ dst_argb[0] = b;
+ dst_argb[1] = g;
+ dst_argb[2] = r;
+ dst_argb[3] = a;
+ src_argb += 4;
+ dst_argb += 4;
+ }
+}
+
+void I422ToYUY2Row_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_frame, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_frame[0] = src_y[0];
+ dst_frame[1] = src_u[0];
+ dst_frame[2] = src_y[1];
+ dst_frame[3] = src_v[0];
+ dst_frame += 4;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ }
+ if (width & 1) {
+ dst_frame[0] = src_y[0];
+ dst_frame[1] = src_u[0];
+ dst_frame[2] = 0;
+ dst_frame[3] = src_v[0];
+ }
+}
+
+void I422ToUYVYRow_C(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_frame, int width) {
+ int x;
+ for (x = 0; x < width - 1; x += 2) {
+ dst_frame[0] = src_u[0];
+ dst_frame[1] = src_y[0];
+ dst_frame[2] = src_v[0];
+ dst_frame[3] = src_y[1];
+ dst_frame += 4;
+ src_y += 2;
+ src_u += 1;
+ src_v += 1;
+ }
+ if (width & 1) {
+ dst_frame[0] = src_u[0];
+ dst_frame[1] = src_y[0];
+ dst_frame[2] = src_v[0];
+ dst_frame[3] = 0;
+ }
+}
+
+// Maximum temporary width for wrappers to process at a time, in pixels.
+#define MAXTWIDTH 2048
+
+#if !(defined(_MSC_VER) && !defined(__clang__)) && \
+ defined(HAS_I422TORGB565ROW_SSSE3)
+// row_win.cc has asm version, but GCC uses 2 step wrapper.
+void I422ToRGB565Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width) {
+ SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, twidth);
+ ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_rgb565 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_I422TOARGB1555ROW_SSSE3)
+void I422ToARGB1555Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb1555,
+ int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, twidth);
+ ARGBToARGB1555Row_SSE2(row, dst_argb1555, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_argb1555 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_I422TOARGB4444ROW_SSSE3)
+void I422ToARGB4444Row_SSSE3(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_SSSE3(src_y, src_u, src_v, row, twidth);
+ ARGBToARGB4444Row_SSE2(row, dst_argb4444, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_argb4444 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_NV12TORGB565ROW_SSSE3)
+void NV12ToRGB565Row_SSSE3(const uint8* src_y, const uint8* src_uv,
+ uint8* dst_rgb565, int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ NV12ToARGBRow_SSSE3(src_y, src_uv, row, twidth);
+ ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth);
+ src_y += twidth;
+ src_uv += twidth;
+ dst_rgb565 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_NV21TORGB565ROW_SSSE3)
+void NV21ToRGB565Row_SSSE3(const uint8* src_y, const uint8* src_vu,
+ uint8* dst_rgb565, int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ NV21ToARGBRow_SSSE3(src_y, src_vu, row, twidth);
+ ARGBToRGB565Row_SSE2(row, dst_rgb565, twidth);
+ src_y += twidth;
+ src_vu += twidth;
+ dst_rgb565 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_YUY2TOARGBROW_SSSE3)
+void YUY2ToARGBRow_SSSE3(const uint8* src_yuy2, uint8* dst_argb, int width) {
+ // Row buffers for intermediate YUV pixels.
+ SIMD_ALIGNED(uint8 row_y[MAXTWIDTH]);
+ SIMD_ALIGNED(uint8 row_u[MAXTWIDTH / 2]);
+ SIMD_ALIGNED(uint8 row_v[MAXTWIDTH / 2]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ YUY2ToUV422Row_SSE2(src_yuy2, row_u, row_v, twidth);
+ YUY2ToYRow_SSE2(src_yuy2, row_y, twidth);
+ I422ToARGBRow_SSSE3(row_y, row_u, row_v, dst_argb, twidth);
+ src_yuy2 += twidth * 2;
+ dst_argb += twidth * 4;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_UYVYTOARGBROW_SSSE3)
+void UYVYToARGBRow_SSSE3(const uint8* src_uyvy, uint8* dst_argb, int width) {
+ // Row buffers for intermediate YUV pixels.
+ SIMD_ALIGNED(uint8 row_y[MAXTWIDTH]);
+ SIMD_ALIGNED(uint8 row_u[MAXTWIDTH / 2]);
+ SIMD_ALIGNED(uint8 row_v[MAXTWIDTH / 2]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ UYVYToUV422Row_SSE2(src_uyvy, row_u, row_v, twidth);
+ UYVYToYRow_SSE2(src_uyvy, row_y, twidth);
+ I422ToARGBRow_SSSE3(row_y, row_u, row_v, dst_argb, twidth);
+ src_uyvy += twidth * 2;
+ dst_argb += twidth * 4;
+ width -= twidth;
+ }
+}
+#endif // !defined(LIBYUV_DISABLE_X86)
+
+#if defined(HAS_I422TORGB565ROW_AVX2)
+void I422ToRGB565Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width) {
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth);
+ ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_rgb565 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_I422TOARGB1555ROW_AVX2)
+void I422ToARGB1555Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb1555,
+ int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth);
+ ARGBToARGB1555Row_AVX2(row, dst_argb1555, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_argb1555 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_I422TOARGB4444ROW_AVX2)
+void I422ToARGB4444Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth);
+ ARGBToARGB4444Row_AVX2(row, dst_argb4444, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_argb4444 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_I422TORGB24ROW_AVX2)
+void I422ToRGB24Row_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth);
+ // TODO(fbarchard): ARGBToRGB24Row_AVX2
+ ARGBToRGB24Row_SSSE3(row, dst_rgb24, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_rgb24 += twidth * 3;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_I422TORAWROW_AVX2)
+void I422ToRAWRow_AVX2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ I422ToARGBRow_AVX2(src_y, src_u, src_v, row, twidth);
+ // TODO(fbarchard): ARGBToRAWRow_AVX2
+ ARGBToRAWRow_SSSE3(row, dst_raw, twidth);
+ src_y += twidth;
+ src_u += twidth / 2;
+ src_v += twidth / 2;
+ dst_raw += twidth * 3;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_NV12TORGB565ROW_AVX2)
+void NV12ToRGB565Row_AVX2(const uint8* src_y, const uint8* src_uv,
+ uint8* dst_rgb565, int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ NV12ToARGBRow_AVX2(src_y, src_uv, row, twidth);
+ ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth);
+ src_y += twidth;
+ src_uv += twidth;
+ dst_rgb565 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_NV21TORGB565ROW_AVX2)
+void NV21ToRGB565Row_AVX2(const uint8* src_y, const uint8* src_vu,
+ uint8* dst_rgb565, int width) {
+ // Row buffer for intermediate ARGB pixels.
+ SIMD_ALIGNED32(uint8 row[MAXTWIDTH * 4]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ NV21ToARGBRow_AVX2(src_y, src_vu, row, twidth);
+ ARGBToRGB565Row_AVX2(row, dst_rgb565, twidth);
+ src_y += twidth;
+ src_vu += twidth;
+ dst_rgb565 += twidth * 2;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_YUY2TOARGBROW_AVX2)
+void YUY2ToARGBRow_AVX2(const uint8* src_yuy2, uint8* dst_argb, int width) {
+ // Row buffers for intermediate YUV pixels.
+ SIMD_ALIGNED32(uint8 row_y[MAXTWIDTH]);
+ SIMD_ALIGNED32(uint8 row_u[MAXTWIDTH / 2]);
+ SIMD_ALIGNED32(uint8 row_v[MAXTWIDTH / 2]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ YUY2ToUV422Row_AVX2(src_yuy2, row_u, row_v, twidth);
+ YUY2ToYRow_AVX2(src_yuy2, row_y, twidth);
+ I422ToARGBRow_AVX2(row_y, row_u, row_v, dst_argb, twidth);
+ src_yuy2 += twidth * 2;
+ dst_argb += twidth * 4;
+ width -= twidth;
+ }
+}
+#endif
+
+#if defined(HAS_UYVYTOARGBROW_AVX2)
+void UYVYToARGBRow_AVX2(const uint8* src_uyvy, uint8* dst_argb, int width) {
+ // Row buffers for intermediate YUV pixels.
+ SIMD_ALIGNED32(uint8 row_y[MAXTWIDTH]);
+ SIMD_ALIGNED32(uint8 row_u[MAXTWIDTH / 2]);
+ SIMD_ALIGNED32(uint8 row_v[MAXTWIDTH / 2]);
+ while (width > 0) {
+ int twidth = width > MAXTWIDTH ? MAXTWIDTH : width;
+ UYVYToUV422Row_AVX2(src_uyvy, row_u, row_v, twidth);
+ UYVYToYRow_AVX2(src_uyvy, row_y, twidth);
+ I422ToARGBRow_AVX2(row_y, row_u, row_v, dst_argb, twidth);
+ src_uyvy += twidth * 2;
+ dst_argb += twidth * 4;
+ width -= twidth;
+ }
+}
+#endif // !defined(LIBYUV_DISABLE_X86)
+
+void ARGBPolynomialRow_C(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width) {
+ int i;
+ for (i = 0; i < width; ++i) {
+ float b = (float)(src_argb[0]);
+ float g = (float)(src_argb[1]);
+ float r = (float)(src_argb[2]);
+ float a = (float)(src_argb[3]);
+ float b2 = b * b;
+ float g2 = g * g;
+ float r2 = r * r;
+ float a2 = a * a;
+ float db = poly[0] + poly[4] * b;
+ float dg = poly[1] + poly[5] * g;
+ float dr = poly[2] + poly[6] * r;
+ float da = poly[3] + poly[7] * a;
+ float b3 = b2 * b;
+ float g3 = g2 * g;
+ float r3 = r2 * r;
+ float a3 = a2 * a;
+ db += poly[8] * b2;
+ dg += poly[9] * g2;
+ dr += poly[10] * r2;
+ da += poly[11] * a2;
+ db += poly[12] * b3;
+ dg += poly[13] * g3;
+ dr += poly[14] * r3;
+ da += poly[15] * a3;
+
+ dst_argb[0] = Clamp((int32)(db));
+ dst_argb[1] = Clamp((int32)(dg));
+ dst_argb[2] = Clamp((int32)(dr));
+ dst_argb[3] = Clamp((int32)(da));
+ src_argb += 4;
+ dst_argb += 4;
+ }
+}
+
+void ARGBLumaColorTableRow_C(const uint8* src_argb, uint8* dst_argb, int width,
+ const uint8* luma, uint32 lumacoeff) {
+ uint32 bc = lumacoeff & 0xff;
+ uint32 gc = (lumacoeff >> 8) & 0xff;
+ uint32 rc = (lumacoeff >> 16) & 0xff;
+
+ int i;
+ for (i = 0; i < width - 1; i += 2) {
+ // Luminance in rows, color values in columns.
+ const uint8* luma0 = ((src_argb[0] * bc + src_argb[1] * gc +
+ src_argb[2] * rc) & 0x7F00u) + luma;
+ const uint8* luma1;
+ dst_argb[0] = luma0[src_argb[0]];
+ dst_argb[1] = luma0[src_argb[1]];
+ dst_argb[2] = luma0[src_argb[2]];
+ dst_argb[3] = src_argb[3];
+ luma1 = ((src_argb[4] * bc + src_argb[5] * gc +
+ src_argb[6] * rc) & 0x7F00u) + luma;
+ dst_argb[4] = luma1[src_argb[4]];
+ dst_argb[5] = luma1[src_argb[5]];
+ dst_argb[6] = luma1[src_argb[6]];
+ dst_argb[7] = src_argb[7];
+ src_argb += 8;
+ dst_argb += 8;
+ }
+ if (width & 1) {
+ // Luminance in rows, color values in columns.
+ const uint8* luma0 = ((src_argb[0] * bc + src_argb[1] * gc +
+ src_argb[2] * rc) & 0x7F00u) + luma;
+ dst_argb[0] = luma0[src_argb[0]];
+ dst_argb[1] = luma0[src_argb[1]];
+ dst_argb[2] = luma0[src_argb[2]];
+ dst_argb[3] = src_argb[3];
+ }
+}
+
+void ARGBCopyAlphaRow_C(const uint8* src, uint8* dst, int width) {
+ int i;
+ for (i = 0; i < width - 1; i += 2) {
+ dst[3] = src[3];
+ dst[7] = src[7];
+ dst += 8;
+ src += 8;
+ }
+ if (width & 1) {
+ dst[3] = src[3];
+ }
+}
+
+void ARGBCopyYToAlphaRow_C(const uint8* src, uint8* dst, int width) {
+ int i;
+ for (i = 0; i < width - 1; i += 2) {
+ dst[3] = src[0];
+ dst[7] = src[1];
+ dst += 8;
+ src += 2;
+ }
+ if (width & 1) {
+ dst[3] = src[0];
+ }
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_gcc.cc b/media/libaom/src/third_party/libyuv/source/row_gcc.cc
new file mode 100644
index 000000000..820de0a1c
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_gcc.cc
@@ -0,0 +1,5475 @@
+// VERSION 2
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC x86 and x64.
+#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
+
+#if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3)
+
+// Constants for ARGB
+static vec8 kARGBToY = {
+ 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
+};
+
+// JPeg full range.
+static vec8 kARGBToYJ = {
+ 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
+};
+#endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_ARGBGRAYROW_SSSE3)
+
+#if defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3)
+
+static vec8 kARGBToU = {
+ 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
+};
+
+static vec8 kARGBToUJ = {
+ 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
+};
+
+static vec8 kARGBToV = {
+ -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
+};
+
+static vec8 kARGBToVJ = {
+ -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
+};
+
+// Constants for BGRA
+static vec8 kBGRAToY = {
+ 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
+};
+
+static vec8 kBGRAToU = {
+ 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
+};
+
+static vec8 kBGRAToV = {
+ 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
+};
+
+// Constants for ABGR
+static vec8 kABGRToY = {
+ 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
+};
+
+static vec8 kABGRToU = {
+ -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
+};
+
+static vec8 kABGRToV = {
+ 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
+};
+
+// Constants for RGBA.
+static vec8 kRGBAToY = {
+ 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
+};
+
+static vec8 kRGBAToU = {
+ 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
+};
+
+static vec8 kRGBAToV = {
+ 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
+};
+
+static uvec8 kAddY16 = {
+ 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
+};
+
+// 7 bit fixed point 0.5.
+static vec16 kAddYJ64 = {
+ 64, 64, 64, 64, 64, 64, 64, 64
+};
+
+static uvec8 kAddUV128 = {
+ 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
+ 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
+};
+
+static uvec16 kAddUVJ128 = {
+ 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
+};
+#endif // defined(HAS_ARGBTOYROW_SSSE3) || defined(HAS_I422TOARGBROW_SSSE3)
+
+#ifdef HAS_RGB24TOARGBROW_SSSE3
+
+// Shuffle table for converting RGB24 to ARGB.
+static uvec8 kShuffleMaskRGB24ToARGB = {
+ 0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
+};
+
+// Shuffle table for converting RAW to ARGB.
+static uvec8 kShuffleMaskRAWToARGB = {
+ 2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
+};
+
+// Shuffle table for converting ARGB to RGB24.
+static uvec8 kShuffleMaskARGBToRGB24 = {
+ 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
+};
+
+// Shuffle table for converting ARGB to RAW.
+static uvec8 kShuffleMaskARGBToRAW = {
+ 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
+};
+
+// Shuffle table for converting ARGBToRGB24 for I422ToRGB24. First 8 + next 4
+static uvec8 kShuffleMaskARGBToRGB24_0 = {
+ 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
+};
+
+// Shuffle table for converting ARGB to RAW.
+static uvec8 kShuffleMaskARGBToRAW_0 = {
+ 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
+};
+#endif // HAS_RGB24TOARGBROW_SSSE3
+
+#if defined(TESTING) && defined(__x86_64__)
+void TestRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
+ asm volatile (
+ ".p2align 5 \n"
+ "mov %%eax,%%eax \n"
+ "mov %%ebx,%%ebx \n"
+ "mov %%ecx,%%ecx \n"
+ "mov %%edx,%%edx \n"
+ "mov %%esi,%%esi \n"
+ "mov %%edi,%%edi \n"
+ "mov %%ebp,%%ebp \n"
+ "mov %%esp,%%esp \n"
+ ".p2align 5 \n"
+ "mov %%r8d,%%r8d \n"
+ "mov %%r9d,%%r9d \n"
+ "mov %%r10d,%%r10d \n"
+ "mov %%r11d,%%r11d \n"
+ "mov %%r12d,%%r12d \n"
+ "mov %%r13d,%%r13d \n"
+ "mov %%r14d,%%r14d \n"
+ "mov %%r15d,%%r15d \n"
+ ".p2align 5 \n"
+ "lea (%%rax),%%eax \n"
+ "lea (%%rbx),%%ebx \n"
+ "lea (%%rcx),%%ecx \n"
+ "lea (%%rdx),%%edx \n"
+ "lea (%%rsi),%%esi \n"
+ "lea (%%rdi),%%edi \n"
+ "lea (%%rbp),%%ebp \n"
+ "lea (%%rsp),%%esp \n"
+ ".p2align 5 \n"
+ "lea (%%r8),%%r8d \n"
+ "lea (%%r9),%%r9d \n"
+ "lea (%%r10),%%r10d \n"
+ "lea (%%r11),%%r11d \n"
+ "lea (%%r12),%%r12d \n"
+ "lea (%%r13),%%r13d \n"
+ "lea (%%r14),%%r14d \n"
+ "lea (%%r15),%%r15d \n"
+
+ ".p2align 5 \n"
+ "lea 0x10(%%rax),%%eax \n"
+ "lea 0x10(%%rbx),%%ebx \n"
+ "lea 0x10(%%rcx),%%ecx \n"
+ "lea 0x10(%%rdx),%%edx \n"
+ "lea 0x10(%%rsi),%%esi \n"
+ "lea 0x10(%%rdi),%%edi \n"
+ "lea 0x10(%%rbp),%%ebp \n"
+ "lea 0x10(%%rsp),%%esp \n"
+ ".p2align 5 \n"
+ "lea 0x10(%%r8),%%r8d \n"
+ "lea 0x10(%%r9),%%r9d \n"
+ "lea 0x10(%%r10),%%r10d \n"
+ "lea 0x10(%%r11),%%r11d \n"
+ "lea 0x10(%%r12),%%r12d \n"
+ "lea 0x10(%%r13),%%r13d \n"
+ "lea 0x10(%%r14),%%r14d \n"
+ "lea 0x10(%%r15),%%r15d \n"
+
+ ".p2align 5 \n"
+ "add 0x10,%%eax \n"
+ "add 0x10,%%ebx \n"
+ "add 0x10,%%ecx \n"
+ "add 0x10,%%edx \n"
+ "add 0x10,%%esi \n"
+ "add 0x10,%%edi \n"
+ "add 0x10,%%ebp \n"
+ "add 0x10,%%esp \n"
+ ".p2align 5 \n"
+ "add 0x10,%%r8d \n"
+ "add 0x10,%%r9d \n"
+ "add 0x10,%%r10d \n"
+ "add 0x10,%%r11d \n"
+ "add 0x10,%%r12d \n"
+ "add 0x10,%%r13d \n"
+ "add 0x10,%%r14d \n"
+ "add 0x10,%%r15d \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc", "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // TESTING
+
+#ifdef HAS_J400TOARGBROW_SSE2
+void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "pslld $0x18,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklwd %%xmm0,%%xmm0 \n"
+ "punpckhwd %%xmm1,%%xmm1 \n"
+ "por %%xmm5,%%xmm0 \n"
+ "por %%xmm5,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :: "memory", "cc", "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // HAS_J400TOARGBROW_SSE2
+
+#ifdef HAS_RGB24TOARGBROW_SSSE3
+void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n" // generate mask 0xff000000
+ "pslld $0x18,%%xmm5 \n"
+ "movdqa %3,%%xmm4 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x30,0) ",%0 \n"
+ "movdqa %%xmm3,%%xmm2 \n"
+ "palignr $0x8,%%xmm1,%%xmm2 \n"
+ "pshufb %%xmm4,%%xmm2 \n"
+ "por %%xmm5,%%xmm2 \n"
+ "palignr $0xc,%%xmm0,%%xmm1 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "movdqu %%xmm2," MEMACCESS2(0x20,1) " \n"
+ "por %%xmm5,%%xmm0 \n"
+ "pshufb %%xmm4,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "por %%xmm5,%%xmm1 \n"
+ "palignr $0x4,%%xmm3,%%xmm3 \n"
+ "pshufb %%xmm4,%%xmm3 \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "por %%xmm5,%%xmm3 \n"
+ "movdqu %%xmm3," MEMACCESS2(0x30,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ : "m"(kShuffleMaskRGB24ToARGB) // %3
+ : "memory", "cc" , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n" // generate mask 0xff000000
+ "pslld $0x18,%%xmm5 \n"
+ "movdqa %3,%%xmm4 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x30,0) ",%0 \n"
+ "movdqa %%xmm3,%%xmm2 \n"
+ "palignr $0x8,%%xmm1,%%xmm2 \n"
+ "pshufb %%xmm4,%%xmm2 \n"
+ "por %%xmm5,%%xmm2 \n"
+ "palignr $0xc,%%xmm0,%%xmm1 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "movdqu %%xmm2," MEMACCESS2(0x20,1) " \n"
+ "por %%xmm5,%%xmm0 \n"
+ "pshufb %%xmm4,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "por %%xmm5,%%xmm1 \n"
+ "palignr $0x4,%%xmm3,%%xmm3 \n"
+ "pshufb %%xmm4,%%xmm3 \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "por %%xmm5,%%xmm3 \n"
+ "movdqu %%xmm3," MEMACCESS2(0x30,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ : "m"(kShuffleMaskRAWToARGB) // %3
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void RGB565ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "mov $0x1080108,%%eax \n"
+ "movd %%eax,%%xmm5 \n"
+ "pshufd $0x0,%%xmm5,%%xmm5 \n"
+ "mov $0x20802080,%%eax \n"
+ "movd %%eax,%%xmm6 \n"
+ "pshufd $0x0,%%xmm6,%%xmm6 \n"
+ "pcmpeqb %%xmm3,%%xmm3 \n"
+ "psllw $0xb,%%xmm3 \n"
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "psllw $0xa,%%xmm4 \n"
+ "psrlw $0x5,%%xmm4 \n"
+ "pcmpeqb %%xmm7,%%xmm7 \n"
+ "psllw $0x8,%%xmm7 \n"
+ "sub %0,%1 \n"
+ "sub %0,%1 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "pand %%xmm3,%%xmm1 \n"
+ "psllw $0xb,%%xmm2 \n"
+ "pmulhuw %%xmm5,%%xmm1 \n"
+ "pmulhuw %%xmm5,%%xmm2 \n"
+ "psllw $0x8,%%xmm1 \n"
+ "por %%xmm2,%%xmm1 \n"
+ "pand %%xmm4,%%xmm0 \n"
+ "pmulhuw %%xmm6,%%xmm0 \n"
+ "por %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm1,%%xmm2 \n"
+ "punpcklbw %%xmm0,%%xmm1 \n"
+ "punpckhbw %%xmm0,%%xmm2 \n"
+ MEMOPMEM(movdqu,xmm1,0x00,1,0,2) // movdqu %%xmm1,(%1,%0,2)
+ MEMOPMEM(movdqu,xmm2,0x10,1,0,2) // movdqu %%xmm2,0x10(%1,%0,2)
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc", "eax", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+void ARGB1555ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "mov $0x1080108,%%eax \n"
+ "movd %%eax,%%xmm5 \n"
+ "pshufd $0x0,%%xmm5,%%xmm5 \n"
+ "mov $0x42004200,%%eax \n"
+ "movd %%eax,%%xmm6 \n"
+ "pshufd $0x0,%%xmm6,%%xmm6 \n"
+ "pcmpeqb %%xmm3,%%xmm3 \n"
+ "psllw $0xb,%%xmm3 \n"
+ "movdqa %%xmm3,%%xmm4 \n"
+ "psrlw $0x6,%%xmm4 \n"
+ "pcmpeqb %%xmm7,%%xmm7 \n"
+ "psllw $0x8,%%xmm7 \n"
+ "sub %0,%1 \n"
+ "sub %0,%1 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "psllw $0x1,%%xmm1 \n"
+ "psllw $0xb,%%xmm2 \n"
+ "pand %%xmm3,%%xmm1 \n"
+ "pmulhuw %%xmm5,%%xmm2 \n"
+ "pmulhuw %%xmm5,%%xmm1 \n"
+ "psllw $0x8,%%xmm1 \n"
+ "por %%xmm2,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "pand %%xmm4,%%xmm0 \n"
+ "psraw $0x8,%%xmm2 \n"
+ "pmulhuw %%xmm6,%%xmm0 \n"
+ "pand %%xmm7,%%xmm2 \n"
+ "por %%xmm2,%%xmm0 \n"
+ "movdqa %%xmm1,%%xmm2 \n"
+ "punpcklbw %%xmm0,%%xmm1 \n"
+ "punpckhbw %%xmm0,%%xmm2 \n"
+ MEMOPMEM(movdqu,xmm1,0x00,1,0,2) // movdqu %%xmm1,(%1,%0,2)
+ MEMOPMEM(movdqu,xmm2,0x10,1,0,2) // movdqu %%xmm2,0x10(%1,%0,2)
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc", "eax", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+void ARGB4444ToARGBRow_SSE2(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "mov $0xf0f0f0f,%%eax \n"
+ "movd %%eax,%%xmm4 \n"
+ "pshufd $0x0,%%xmm4,%%xmm4 \n"
+ "movdqa %%xmm4,%%xmm5 \n"
+ "pslld $0x4,%%xmm5 \n"
+ "sub %0,%1 \n"
+ "sub %0,%1 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "pand %%xmm4,%%xmm0 \n"
+ "pand %%xmm5,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "psllw $0x4,%%xmm1 \n"
+ "psrlw $0x4,%%xmm3 \n"
+ "por %%xmm1,%%xmm0 \n"
+ "por %%xmm3,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm2,%%xmm0 \n"
+ "punpckhbw %%xmm2,%%xmm1 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,2) // movdqu %%xmm0,(%1,%0,2)
+ MEMOPMEM(movdqu,xmm1,0x10,1,0,2) // movdqu %%xmm1,0x10(%1,%0,2)
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc", "eax", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void ARGBToRGB24Row_SSSE3(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "movdqa %3,%%xmm6 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "pshufb %%xmm6,%%xmm0 \n"
+ "pshufb %%xmm6,%%xmm1 \n"
+ "pshufb %%xmm6,%%xmm2 \n"
+ "pshufb %%xmm6,%%xmm3 \n"
+ "movdqa %%xmm1,%%xmm4 \n"
+ "psrldq $0x4,%%xmm1 \n"
+ "pslldq $0xc,%%xmm4 \n"
+ "movdqa %%xmm2,%%xmm5 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "pslldq $0x8,%%xmm5 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "por %%xmm5,%%xmm1 \n"
+ "psrldq $0x8,%%xmm2 \n"
+ "pslldq $0x4,%%xmm3 \n"
+ "por %%xmm3,%%xmm2 \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "movdqu %%xmm2," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x30,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ : "m"(kShuffleMaskARGBToRGB24) // %3
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+
+void ARGBToRAWRow_SSSE3(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "movdqa %3,%%xmm6 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "pshufb %%xmm6,%%xmm0 \n"
+ "pshufb %%xmm6,%%xmm1 \n"
+ "pshufb %%xmm6,%%xmm2 \n"
+ "pshufb %%xmm6,%%xmm3 \n"
+ "movdqa %%xmm1,%%xmm4 \n"
+ "psrldq $0x4,%%xmm1 \n"
+ "pslldq $0xc,%%xmm4 \n"
+ "movdqa %%xmm2,%%xmm5 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "pslldq $0x8,%%xmm5 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "por %%xmm5,%%xmm1 \n"
+ "psrldq $0x8,%%xmm2 \n"
+ "pslldq $0x4,%%xmm3 \n"
+ "por %%xmm3,%%xmm2 \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "movdqu %%xmm2," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x30,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ : "m"(kShuffleMaskARGBToRAW) // %3
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+
+void ARGBToRGB565Row_SSE2(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm3,%%xmm3 \n"
+ "psrld $0x1b,%%xmm3 \n"
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "psrld $0x1a,%%xmm4 \n"
+ "pslld $0x5,%%xmm4 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "pslld $0xb,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "pslld $0x8,%%xmm0 \n"
+ "psrld $0x3,%%xmm1 \n"
+ "psrld $0x5,%%xmm2 \n"
+ "psrad $0x10,%%xmm0 \n"
+ "pand %%xmm3,%%xmm1 \n"
+ "pand %%xmm4,%%xmm2 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "por %%xmm2,%%xmm1 \n"
+ "por %%xmm1,%%xmm0 \n"
+ "packssdw %%xmm0,%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void ARGBToARGB1555Row_SSE2(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "psrld $0x1b,%%xmm4 \n"
+ "movdqa %%xmm4,%%xmm5 \n"
+ "pslld $0x5,%%xmm5 \n"
+ "movdqa %%xmm4,%%xmm6 \n"
+ "pslld $0xa,%%xmm6 \n"
+ "pcmpeqb %%xmm7,%%xmm7 \n"
+ "pslld $0xf,%%xmm7 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm3 \n"
+ "psrad $0x10,%%xmm0 \n"
+ "psrld $0x3,%%xmm1 \n"
+ "psrld $0x6,%%xmm2 \n"
+ "psrld $0x9,%%xmm3 \n"
+ "pand %%xmm7,%%xmm0 \n"
+ "pand %%xmm4,%%xmm1 \n"
+ "pand %%xmm5,%%xmm2 \n"
+ "pand %%xmm6,%%xmm3 \n"
+ "por %%xmm1,%%xmm0 \n"
+ "por %%xmm3,%%xmm2 \n"
+ "por %%xmm2,%%xmm0 \n"
+ "packssdw %%xmm0,%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ :: "memory", "cc",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+void ARGBToARGB4444Row_SSE2(const uint8* src, uint8* dst, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "psllw $0xc,%%xmm4 \n"
+ "movdqa %%xmm4,%%xmm3 \n"
+ "psrlw $0x8,%%xmm3 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "pand %%xmm3,%%xmm0 \n"
+ "pand %%xmm4,%%xmm1 \n"
+ "psrlq $0x4,%%xmm0 \n"
+ "psrlq $0x8,%%xmm1 \n"
+ "por %%xmm1,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(pix) // %2
+ :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"
+ );
+}
+#endif // HAS_RGB24TOARGBROW_SSSE3
+
+#ifdef HAS_ARGBTOYROW_SSSE3
+// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
+void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "movdqa %3,%%xmm4 \n"
+ "movdqa %4,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm4,%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm3,%%xmm2 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "psrlw $0x7,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kARGBToY), // %3
+ "m"(kAddY16) // %4
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBTOYROW_SSSE3
+
+#ifdef HAS_ARGBTOYJROW_SSSE3
+// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
+// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
+void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "movdqa %3,%%xmm4 \n"
+ "movdqa %4,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm4,%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm3,%%xmm2 \n"
+ "paddw %%xmm5,%%xmm0 \n"
+ "paddw %%xmm5,%%xmm2 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "psrlw $0x7,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kARGBToYJ), // %3
+ "m"(kAddYJ64) // %4
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBTOYJROW_SSSE3
+
+#ifdef HAS_ARGBTOYROW_AVX2
+// vpermd for vphaddw + vpackuswb vpermd.
+static const lvec32 kPermdARGBToY_AVX = {
+ 0, 4, 1, 5, 2, 6, 3, 7
+};
+
+// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
+void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "vbroadcastf128 %3,%%ymm4 \n"
+ "vbroadcastf128 %4,%%ymm5 \n"
+ "vmovdqu %5,%%ymm6 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "vmovdqu " MEMACCESS2(0x40,0) ",%%ymm2 \n"
+ "vmovdqu " MEMACCESS2(0x60,0) ",%%ymm3 \n"
+ "vpmaddubsw %%ymm4,%%ymm0,%%ymm0 \n"
+ "vpmaddubsw %%ymm4,%%ymm1,%%ymm1 \n"
+ "vpmaddubsw %%ymm4,%%ymm2,%%ymm2 \n"
+ "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n"
+ "lea " MEMLEA(0x80,0) ",%0 \n"
+ "vphaddw %%ymm1,%%ymm0,%%ymm0 \n" // mutates.
+ "vphaddw %%ymm3,%%ymm2,%%ymm2 \n"
+ "vpsrlw $0x7,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x7,%%ymm2,%%ymm2 \n"
+ "vpackuswb %%ymm2,%%ymm0,%%ymm0 \n" // mutates.
+ "vpermd %%ymm0,%%ymm6,%%ymm0 \n" // unmutate.
+ "vpaddb %%ymm5,%%ymm0,%%ymm0 \n" // add 16 for Y
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kARGBToY), // %3
+ "m"(kAddY16), // %4
+ "m"(kPermdARGBToY_AVX) // %5
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+#endif // HAS_ARGBTOYROW_AVX2
+
+#ifdef HAS_ARGBTOYJROW_AVX2
+// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
+void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "vbroadcastf128 %3,%%ymm4 \n"
+ "vbroadcastf128 %4,%%ymm5 \n"
+ "vmovdqu %5,%%ymm6 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "vmovdqu " MEMACCESS2(0x40,0) ",%%ymm2 \n"
+ "vmovdqu " MEMACCESS2(0x60,0) ",%%ymm3 \n"
+ "vpmaddubsw %%ymm4,%%ymm0,%%ymm0 \n"
+ "vpmaddubsw %%ymm4,%%ymm1,%%ymm1 \n"
+ "vpmaddubsw %%ymm4,%%ymm2,%%ymm2 \n"
+ "vpmaddubsw %%ymm4,%%ymm3,%%ymm3 \n"
+ "lea " MEMLEA(0x80,0) ",%0 \n"
+ "vphaddw %%ymm1,%%ymm0,%%ymm0 \n" // mutates.
+ "vphaddw %%ymm3,%%ymm2,%%ymm2 \n"
+ "vpaddw %%ymm5,%%ymm0,%%ymm0 \n" // Add .5 for rounding.
+ "vpaddw %%ymm5,%%ymm2,%%ymm2 \n"
+ "vpsrlw $0x7,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x7,%%ymm2,%%ymm2 \n"
+ "vpackuswb %%ymm2,%%ymm0,%%ymm0 \n" // mutates.
+ "vpermd %%ymm0,%%ymm6,%%ymm0 \n" // unmutate.
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kARGBToYJ), // %3
+ "m"(kAddYJ64), // %4
+ "m"(kPermdARGBToY_AVX) // %5
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+#endif // HAS_ARGBTOYJROW_AVX2
+
+#ifdef HAS_ARGBTOUVROW_SSSE3
+void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "movdqa %5,%%xmm3 \n"
+ "movdqa %6,%%xmm4 \n"
+ "movdqa %7,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm7) // movdqu (%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x10,0,4,1,xmm7) // movdqu 0x10(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ MEMOPREG(movdqu,0x20,0,4,1,xmm7) // movdqu 0x20(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x30,0,4,1,xmm7) // movdqu 0x30(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm7 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm7 \n"
+ "shufps $0x88,%%xmm6,%%xmm2 \n"
+ "shufps $0xdd,%%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm2,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm1 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm1 \n"
+ "packsswb %%xmm1,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movlps %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_argb)), // %4
+ "m"(kARGBToV), // %5
+ "m"(kARGBToU), // %6
+ "m"(kAddUV128) // %7
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBTOUVROW_SSSE3
+
+#ifdef HAS_ARGBTOUVROW_AVX2
+// vpshufb for vphaddw + vpackuswb packed to shorts.
+static const lvec8 kShufARGBToUV_AVX = {
+ 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
+ 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
+};
+void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "vbroadcastf128 %5,%%ymm5 \n"
+ "vbroadcastf128 %6,%%ymm6 \n"
+ "vbroadcastf128 %7,%%ymm7 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "vmovdqu " MEMACCESS2(0x40,0) ",%%ymm2 \n"
+ "vmovdqu " MEMACCESS2(0x60,0) ",%%ymm3 \n"
+ VMEMOPREG(vpavgb,0x00,0,4,1,ymm0,ymm0) // vpavgb (%0,%4,1),%%ymm0,%%ymm0
+ VMEMOPREG(vpavgb,0x20,0,4,1,ymm1,ymm1)
+ VMEMOPREG(vpavgb,0x40,0,4,1,ymm2,ymm2)
+ VMEMOPREG(vpavgb,0x60,0,4,1,ymm3,ymm3)
+ "lea " MEMLEA(0x80,0) ",%0 \n"
+ "vshufps $0x88,%%ymm1,%%ymm0,%%ymm4 \n"
+ "vshufps $0xdd,%%ymm1,%%ymm0,%%ymm0 \n"
+ "vpavgb %%ymm4,%%ymm0,%%ymm0 \n"
+ "vshufps $0x88,%%ymm3,%%ymm2,%%ymm4 \n"
+ "vshufps $0xdd,%%ymm3,%%ymm2,%%ymm2 \n"
+ "vpavgb %%ymm4,%%ymm2,%%ymm2 \n"
+
+ "vpmaddubsw %%ymm7,%%ymm0,%%ymm1 \n"
+ "vpmaddubsw %%ymm7,%%ymm2,%%ymm3 \n"
+ "vpmaddubsw %%ymm6,%%ymm0,%%ymm0 \n"
+ "vpmaddubsw %%ymm6,%%ymm2,%%ymm2 \n"
+ "vphaddw %%ymm3,%%ymm1,%%ymm1 \n"
+ "vphaddw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpsraw $0x8,%%ymm1,%%ymm1 \n"
+ "vpsraw $0x8,%%ymm0,%%ymm0 \n"
+ "vpacksswb %%ymm0,%%ymm1,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpshufb %8,%%ymm0,%%ymm0 \n"
+ "vpaddb %%ymm5,%%ymm0,%%ymm0 \n"
+
+ "vextractf128 $0x0,%%ymm0," MEMACCESS(1) " \n"
+ VEXTOPMEM(vextractf128,1,ymm0,0x0,1,2,1) // vextractf128 $1,%%ymm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_argb)), // %4
+ "m"(kAddUV128), // %5
+ "m"(kARGBToV), // %6
+ "m"(kARGBToU), // %7
+ "m"(kShufARGBToUV_AVX) // %8
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBTOUVROW_AVX2
+
+#ifdef HAS_ARGBTOUVJROW_SSSE3
+void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "movdqa %5,%%xmm3 \n"
+ "movdqa %6,%%xmm4 \n"
+ "movdqa %7,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm7) // movdqu (%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x10,0,4,1,xmm7) // movdqu 0x10(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ MEMOPREG(movdqu,0x20,0,4,1,xmm7) // movdqu 0x20(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x30,0,4,1,xmm7) // movdqu 0x30(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm7 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm7 \n"
+ "shufps $0x88,%%xmm6,%%xmm2 \n"
+ "shufps $0xdd,%%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm2,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm1 \n"
+ "paddw %%xmm5,%%xmm0 \n"
+ "paddw %%xmm5,%%xmm1 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm1 \n"
+ "packsswb %%xmm1,%%xmm0 \n"
+ "movlps %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_argb)), // %4
+ "m"(kARGBToVJ), // %5
+ "m"(kARGBToUJ), // %6
+ "m"(kAddUVJ128) // %7
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBTOUVJROW_SSSE3
+
+#ifdef HAS_ARGBTOUV444ROW_SSSE3
+void ARGBToUV444Row_SSSE3(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int width) {
+ asm volatile (
+ "movdqa %4,%%xmm3 \n"
+ "movdqa %5,%%xmm4 \n"
+ "movdqa %6,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm4,%%xmm6 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm2 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm2 \n"
+ "packsswb %%xmm2,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ "pmaddubsw %%xmm3,%%xmm0 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm2 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm2 \n"
+ "packsswb %%xmm2,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,2,1) // movdqu %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "m"(kARGBToV), // %4
+ "m"(kARGBToU), // %5
+ "m"(kAddUV128) // %6
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6"
+ );
+}
+#endif // HAS_ARGBTOUV444ROW_SSSE3
+
+#ifdef HAS_ARGBTOUV422ROW_SSSE3
+void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "movdqa %4,%%xmm3 \n"
+ "movdqa %5,%%xmm4 \n"
+ "movdqa %6,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm7 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm7 \n"
+ "shufps $0x88,%%xmm6,%%xmm2 \n"
+ "shufps $0xdd,%%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm2,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm1 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm1 \n"
+ "packsswb %%xmm1,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movlps %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "m"(kARGBToV), // %4
+ "m"(kARGBToU), // %5
+ "m"(kAddUV128) // %6
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBTOUV422ROW_SSSE3
+
+void BGRAToYRow_SSSE3(const uint8* src_bgra, uint8* dst_y, int pix) {
+ asm volatile (
+ "movdqa %4,%%xmm5 \n"
+ "movdqa %3,%%xmm4 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm4,%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm3,%%xmm2 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "psrlw $0x7,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_bgra), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kBGRAToY), // %3
+ "m"(kAddY16) // %4
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void BGRAToUVRow_SSSE3(const uint8* src_bgra0, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "movdqa %5,%%xmm3 \n"
+ "movdqa %6,%%xmm4 \n"
+ "movdqa %7,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm7) // movdqu (%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x10,0,4,1,xmm7) // movdqu 0x10(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ MEMOPREG(movdqu,0x20,0,4,1,xmm7) // movdqu 0x20(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x30,0,4,1,xmm7) // movdqu 0x30(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm7 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm7 \n"
+ "shufps $0x88,%%xmm6,%%xmm2 \n"
+ "shufps $0xdd,%%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm2,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm1 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm1 \n"
+ "packsswb %%xmm1,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movlps %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_bgra0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_bgra)), // %4
+ "m"(kBGRAToV), // %5
+ "m"(kBGRAToU), // %6
+ "m"(kAddUV128) // %7
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
+ );
+}
+
+void ABGRToYRow_SSSE3(const uint8* src_abgr, uint8* dst_y, int pix) {
+ asm volatile (
+ "movdqa %4,%%xmm5 \n"
+ "movdqa %3,%%xmm4 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm4,%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm3,%%xmm2 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "psrlw $0x7,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_abgr), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kABGRToY), // %3
+ "m"(kAddY16) // %4
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void RGBAToYRow_SSSE3(const uint8* src_rgba, uint8* dst_y, int pix) {
+ asm volatile (
+ "movdqa %4,%%xmm5 \n"
+ "movdqa %3,%%xmm4 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm4,%%xmm3 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "phaddw %%xmm3,%%xmm2 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "psrlw $0x7,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_rgba), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ : "m"(kRGBAToY), // %3
+ "m"(kAddY16) // %4
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void ABGRToUVRow_SSSE3(const uint8* src_abgr0, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "movdqa %5,%%xmm3 \n"
+ "movdqa %6,%%xmm4 \n"
+ "movdqa %7,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm7) // movdqu (%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x10,0,4,1,xmm7) // movdqu 0x10(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ MEMOPREG(movdqu,0x20,0,4,1,xmm7) // movdqu 0x20(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x30,0,4,1,xmm7) // movdqu 0x30(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm7 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm7 \n"
+ "shufps $0x88,%%xmm6,%%xmm2 \n"
+ "shufps $0xdd,%%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm2,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm1 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm1 \n"
+ "packsswb %%xmm1,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movlps %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_abgr0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_abgr)), // %4
+ "m"(kABGRToV), // %5
+ "m"(kABGRToU), // %6
+ "m"(kAddUV128) // %7
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
+ );
+}
+
+void RGBAToUVRow_SSSE3(const uint8* src_rgba0, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int width) {
+ asm volatile (
+ "movdqa %5,%%xmm3 \n"
+ "movdqa %6,%%xmm4 \n"
+ "movdqa %7,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm7) // movdqu (%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x10,0,4,1,xmm7) // movdqu 0x10(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ MEMOPREG(movdqu,0x20,0,4,1,xmm7) // movdqu 0x20(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x30,0,4,1,xmm7) // movdqu 0x30(%0,%4,1),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm7 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm7 \n"
+ "shufps $0x88,%%xmm6,%%xmm2 \n"
+ "shufps $0xdd,%%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm2 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "phaddw %%xmm2,%%xmm0 \n"
+ "phaddw %%xmm6,%%xmm1 \n"
+ "psraw $0x8,%%xmm0 \n"
+ "psraw $0x8,%%xmm1 \n"
+ "packsswb %%xmm1,%%xmm0 \n"
+ "paddb %%xmm5,%%xmm0 \n"
+ "movlps %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhps,xmm0,0x00,1,2,1) // movhps %%xmm0,(%1,%2,1)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_rgba0), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+rm"(width) // %3
+ : "r"((intptr_t)(src_stride_rgba)), // %4
+ "m"(kRGBAToV), // %5
+ "m"(kRGBAToU), // %6
+ "m"(kAddUV128) // %7
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm6", "xmm7"
+ );
+}
+
+#if defined(HAS_I422TOARGBROW_SSSE3) || defined(HAS_I422TOARGBROW_AVX2)
+
+struct YuvConstants {
+ lvec8 kUVToB; // 0
+ lvec8 kUVToG; // 32
+ lvec8 kUVToR; // 64
+ lvec16 kUVBiasB; // 96
+ lvec16 kUVBiasG; // 128
+ lvec16 kUVBiasR; // 160
+ lvec16 kYToRgb; // 192
+};
+
+// BT.601 YUV to RGB reference
+// R = (Y - 16) * 1.164 - V * -1.596
+// G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813
+// B = (Y - 16) * 1.164 - U * -2.018
+
+// Y contribution to R,G,B. Scale and bias.
+// TODO(fbarchard): Consider moving constants into a common header.
+#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
+#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */
+
+// U and V contributions to R,G,B.
+#define UB -128 /* max(-128, round(-2.018 * 64)) */
+#define UG 25 /* round(0.391 * 64) */
+#define VG 52 /* round(0.813 * 64) */
+#define VR -102 /* round(-1.596 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BB (UB * 128 + YGB)
+#define BG (UG * 128 + VG * 128 + YGB)
+#define BR (VR * 128 + YGB)
+
+// BT601 constants for YUV to RGB.
+static YuvConstants SIMD_ALIGNED(kYuvConstants) = {
+ { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
+ UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 },
+ { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
+ UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG },
+ { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
+ 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR },
+ { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
+ { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
+ { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
+ { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
+};
+
+// BT601 constants for NV21 where chroma plane is VU instead of UV.
+static YuvConstants SIMD_ALIGNED(kYvuConstants) = {
+ { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
+ 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB },
+ { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
+ VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG },
+ { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
+ VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 },
+ { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
+ { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
+ { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
+ { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
+};
+
+#undef YG
+#undef YGB
+#undef UB
+#undef UG
+#undef VG
+#undef VR
+#undef BB
+#undef BG
+#undef BR
+
+// JPEG YUV to RGB reference
+// * R = Y - V * -1.40200
+// * G = Y - U * 0.34414 - V * 0.71414
+// * B = Y - U * -1.77200
+
+// Y contribution to R,G,B. Scale and bias.
+// TODO(fbarchard): Consider moving constants into a common header.
+#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
+#define YGBJ 32 /* 64 / 2 */
+
+// U and V contributions to R,G,B.
+#define UBJ -113 /* round(-1.77200 * 64) */
+#define UGJ 22 /* round(0.34414 * 64) */
+#define VGJ 46 /* round(0.71414 * 64) */
+#define VRJ -90 /* round(-1.40200 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BBJ (UBJ * 128 + YGBJ)
+#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ)
+#define BRJ (VRJ * 128 + YGBJ)
+
+// JPEG constants for YUV to RGB.
+YuvConstants SIMD_ALIGNED(kYuvJConstants) = {
+ { UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0,
+ UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0 },
+ { UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
+ UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
+ UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
+ UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ },
+ { 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ,
+ 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ },
+ { BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ,
+ BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ },
+ { BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ,
+ BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ },
+ { BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ,
+ BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ },
+ { YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ,
+ YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ }
+};
+
+#undef YGJ
+#undef YGBJ
+#undef UBJ
+#undef UGJ
+#undef VGJ
+#undef VRJ
+#undef BBJ
+#undef BGJ
+#undef BRJ
+
+// Read 8 UV from 411
+#define READYUV444 \
+ "movq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
+ MEMOPREG(movq, 0x00, [u_buf], [v_buf], 1, xmm1) \
+ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
+ "punpcklbw %%xmm1,%%xmm0 \n"
+
+// Read 4 UV from 422, upsample to 8 UV
+#define READYUV422 \
+ "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
+ MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \
+ "lea " MEMLEA(0x4, [u_buf]) ",%[u_buf] \n" \
+ "punpcklbw %%xmm1,%%xmm0 \n" \
+ "punpcklwd %%xmm0,%%xmm0 \n"
+
+// Read 2 UV from 411, upsample to 8 UV
+#define READYUV411 \
+ "movd " MEMACCESS([u_buf]) ",%%xmm0 \n" \
+ MEMOPREG(movd, 0x00, [u_buf], [v_buf], 1, xmm1) \
+ "lea " MEMLEA(0x2, [u_buf]) ",%[u_buf] \n" \
+ "punpcklbw %%xmm1,%%xmm0 \n" \
+ "punpcklwd %%xmm0,%%xmm0 \n" \
+ "punpckldq %%xmm0,%%xmm0 \n"
+
+// Read 4 UV from NV12, upsample to 8 UV
+#define READNV12 \
+ "movq " MEMACCESS([uv_buf]) ",%%xmm0 \n" \
+ "lea " MEMLEA(0x8, [uv_buf]) ",%[uv_buf] \n" \
+ "punpcklwd %%xmm0,%%xmm0 \n"
+
+// Convert 8 pixels: 8 UV and 8 Y
+#define YUVTORGB(YuvConstants) \
+ "movdqa %%xmm0,%%xmm1 \n" \
+ "movdqa %%xmm0,%%xmm2 \n" \
+ "movdqa %%xmm0,%%xmm3 \n" \
+ "movdqa " MEMACCESS2(96, [YuvConstants]) ",%%xmm0 \n" \
+ "pmaddubsw " MEMACCESS([YuvConstants]) ",%%xmm1 \n" \
+ "psubw %%xmm1,%%xmm0 \n" \
+ "movdqa " MEMACCESS2(128, [YuvConstants]) ",%%xmm1 \n" \
+ "pmaddubsw " MEMACCESS2(32, [YuvConstants]) ",%%xmm2 \n" \
+ "psubw %%xmm2,%%xmm1 \n" \
+ "movdqa " MEMACCESS2(160, [YuvConstants]) ",%%xmm2 \n" \
+ "pmaddubsw " MEMACCESS2(64, [YuvConstants]) ",%%xmm3 \n" \
+ "psubw %%xmm3,%%xmm2 \n" \
+ "movq " MEMACCESS([y_buf]) ",%%xmm3 \n" \
+ "lea " MEMLEA(0x8, [y_buf]) ",%[y_buf] \n" \
+ "punpcklbw %%xmm3,%%xmm3 \n" \
+ "pmulhuw " MEMACCESS2(192, [YuvConstants]) ",%%xmm3 \n" \
+ "paddsw %%xmm3,%%xmm0 \n" \
+ "paddsw %%xmm3,%%xmm1 \n" \
+ "paddsw %%xmm3,%%xmm2 \n" \
+ "psraw $0x6,%%xmm0 \n" \
+ "psraw $0x6,%%xmm1 \n" \
+ "psraw $0x6,%%xmm2 \n" \
+ "packuswb %%xmm0,%%xmm0 \n" \
+ "packuswb %%xmm1,%%xmm1 \n" \
+ "packuswb %%xmm2,%%xmm2 \n"
+
+// Store 8 ARGB values. Assumes XMM5 is zero.
+#define STOREARGB \
+ "punpcklbw %%xmm1,%%xmm0 \n" \
+ "punpcklbw %%xmm5,%%xmm2 \n" \
+ "movdqa %%xmm0,%%xmm1 \n" \
+ "punpcklwd %%xmm2,%%xmm0 \n" \
+ "punpckhwd %%xmm2,%%xmm1 \n" \
+ "movdqu %%xmm0," MEMACCESS([dst_argb]) " \n" \
+ "movdqu %%xmm1," MEMACCESS2(0x10, [dst_argb]) " \n" \
+ "lea " MEMLEA(0x20, [dst_argb]) ", %[dst_argb] \n"
+
+// Store 8 BGRA values. Assumes XMM5 is zero.
+#define STOREBGRA \
+ "pcmpeqb %%xmm5,%%xmm5 \n" \
+ "punpcklbw %%xmm0,%%xmm1 \n" \
+ "punpcklbw %%xmm2,%%xmm5 \n" \
+ "movdqa %%xmm5,%%xmm0 \n" \
+ "punpcklwd %%xmm1,%%xmm5 \n" \
+ "punpckhwd %%xmm1,%%xmm0 \n" \
+ "movdqu %%xmm5," MEMACCESS([dst_bgra]) " \n" \
+ "movdqu %%xmm0," MEMACCESS2(0x10, [dst_bgra]) " \n" \
+ "lea " MEMLEA(0x20, [dst_bgra]) ", %[dst_bgra] \n"
+
+// Store 8 ABGR values. Assumes XMM5 is zero.
+#define STOREABGR \
+ "punpcklbw %%xmm1,%%xmm2 \n" \
+ "punpcklbw %%xmm5,%%xmm0 \n" \
+ "movdqa %%xmm2,%%xmm1 \n" \
+ "punpcklwd %%xmm0,%%xmm2 \n" \
+ "punpckhwd %%xmm0,%%xmm1 \n" \
+ "movdqu %%xmm2," MEMACCESS([dst_abgr]) " \n" \
+ "movdqu %%xmm1," MEMACCESS2(0x10, [dst_abgr]) " \n" \
+ "lea " MEMLEA(0x20, [dst_abgr]) ", %[dst_abgr] \n"
+
+// Store 8 RGBA values. Assumes XMM5 is zero.
+#define STORERGBA \
+ "pcmpeqb %%xmm5,%%xmm5 \n" \
+ "punpcklbw %%xmm2,%%xmm1 \n" \
+ "punpcklbw %%xmm0,%%xmm5 \n" \
+ "movdqa %%xmm5,%%xmm0 \n" \
+ "punpcklwd %%xmm1,%%xmm5 \n" \
+ "punpckhwd %%xmm1,%%xmm0 \n" \
+ "movdqu %%xmm5," MEMACCESS([dst_rgba]) " \n" \
+ "movdqu %%xmm0," MEMACCESS2(0x10, [dst_rgba]) " \n" \
+ "lea " MEMLEA(0x20, [dst_rgba]) ",%[dst_rgba] \n"
+
+void OMITFP I444ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV444
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+// TODO(fbarchard): Consider putting masks into constants.
+void OMITFP I422ToRGB24Row_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_rgb24,
+ int width) {
+ asm volatile (
+ "movdqa %[kShuffleMaskARGBToRGB24_0],%%xmm5 \n"
+ "movdqa %[kShuffleMaskARGBToRGB24],%%xmm6 \n"
+ "sub %[u_buf],%[v_buf] \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ "punpcklbw %%xmm1,%%xmm0 \n"
+ "punpcklbw %%xmm2,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklwd %%xmm2,%%xmm0 \n"
+ "punpckhwd %%xmm2,%%xmm1 \n"
+ "pshufb %%xmm5,%%xmm0 \n"
+ "pshufb %%xmm6,%%xmm1 \n"
+ "palignr $0xc,%%xmm0,%%xmm1 \n"
+ "movq %%xmm0," MEMACCESS([dst_rgb24]) "\n"
+ "movdqu %%xmm1," MEMACCESS2(0x8,[dst_rgb24]) "\n"
+ "lea " MEMLEA(0x18,[dst_rgb24]) ",%[dst_rgb24] \n"
+ "subl $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_rgb24]"+r"(dst_rgb24), // %[dst_rgb24]
+// TODO(fbarchard): Make width a register for 32 bit.
+#if defined(__i386__) && defined(__pic__)
+ [width]"+m"(width) // %[width]
+#else
+ [width]"+rm"(width) // %[width]
+#endif
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB),
+ [kShuffleMaskARGBToRGB24_0]"m"(kShuffleMaskARGBToRGB24_0),
+ [kShuffleMaskARGBToRGB24]"m"(kShuffleMaskARGBToRGB24)
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5", "xmm6"
+ );
+}
+
+void OMITFP I422ToRAWRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_raw,
+ int width) {
+ asm volatile (
+ "movdqa %[kShuffleMaskARGBToRAW_0],%%xmm5 \n"
+ "movdqa %[kShuffleMaskARGBToRAW],%%xmm6 \n"
+ "sub %[u_buf],%[v_buf] \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ "punpcklbw %%xmm1,%%xmm0 \n"
+ "punpcklbw %%xmm2,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklwd %%xmm2,%%xmm0 \n"
+ "punpckhwd %%xmm2,%%xmm1 \n"
+ "pshufb %%xmm5,%%xmm0 \n"
+ "pshufb %%xmm6,%%xmm1 \n"
+ "palignr $0xc,%%xmm0,%%xmm1 \n"
+ "movq %%xmm0," MEMACCESS([dst_raw]) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x8,[dst_raw]) "\n"
+ "lea " MEMLEA(0x18,[dst_raw]) ",%[dst_raw] \n"
+ "subl $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_raw]"+r"(dst_raw), // %[dst_raw]
+// TODO(fbarchard): Make width a register for 32 bit.
+#if defined(__i386__) && defined(__pic__)
+ [width]"+m"(width) // %[width]
+#else
+ [width]"+rm"(width) // %[width]
+#endif
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB),
+ [kShuffleMaskARGBToRAW_0]"m"(kShuffleMaskARGBToRAW_0),
+ [kShuffleMaskARGBToRAW]"m"(kShuffleMaskARGBToRAW)
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5", "xmm6"
+ );
+}
+
+void OMITFP I422ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP J422ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvJConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP I411ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV411
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP NV12ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READNV12
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [uv_buf]"+r"(uv_buf), // %[uv_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ // Does not use r14.
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP NV21ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READNV12
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [uv_buf]"+r"(uv_buf), // %[uv_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYvuConstants.kUVToB) // %[kYuvConstants]
+ // Does not use r14.
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP I422ToBGRARow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_bgra,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREBGRA
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_bgra]"+r"(dst_bgra), // %[dst_bgra]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP I422ToABGRRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_abgr,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREABGR
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_abgr]"+r"(dst_abgr), // %[dst_abgr]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void OMITFP I422ToRGBARow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_rgba,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STORERGBA
+ "sub $0x8,%[width] \n"
+ "jg 1b \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_rgba]"+r"(dst_rgba), // %[dst_rgba]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+#endif // HAS_I422TOARGBROW_SSSE3
+
+// Read 8 UV from 422, upsample to 16 UV.
+#define READYUV422_AVX2 \
+ "vmovq " MEMACCESS([u_buf]) ",%%xmm0 \n" \
+ MEMOPREG(vmovq, 0x00, [u_buf], [v_buf], 1, xmm1) \
+ "lea " MEMLEA(0x8, [u_buf]) ",%[u_buf] \n" \
+ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" \
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n" \
+ "vpunpcklwd %%ymm0,%%ymm0,%%ymm0 \n"
+
+// Convert 16 pixels: 16 UV and 16 Y.
+#define YUVTORGB_AVX2(YuvConstants) \
+ "vpmaddubsw " MEMACCESS2(64, [YuvConstants]) ",%%ymm0,%%ymm2 \n" \
+ "vpmaddubsw " MEMACCESS2(32, [YuvConstants]) ",%%ymm0,%%ymm1 \n" \
+ "vpmaddubsw " MEMACCESS([YuvConstants]) ",%%ymm0,%%ymm0 \n" \
+ "vmovdqu " MEMACCESS2(160, [YuvConstants]) ",%%ymm3 \n" \
+ "vpsubw %%ymm2,%%ymm3,%%ymm2 \n" \
+ "vmovdqu " MEMACCESS2(128, [YuvConstants]) ",%%ymm3 \n" \
+ "vpsubw %%ymm1,%%ymm3,%%ymm1 \n" \
+ "vmovdqu " MEMACCESS2(96, [YuvConstants]) ",%%ymm3 \n" \
+ "vpsubw %%ymm0,%%ymm3,%%ymm0 \n" \
+ "vmovdqu " MEMACCESS([y_buf]) ",%%xmm3 \n" \
+ "lea " MEMLEA(0x10, [y_buf]) ",%[y_buf] \n" \
+ "vpermq $0xd8,%%ymm3,%%ymm3 \n" \
+ "vpunpcklbw %%ymm3,%%ymm3,%%ymm3 \n" \
+ "vpmulhuw " MEMACCESS2(192, [YuvConstants]) ",%%ymm3,%%ymm3 \n" \
+ "vpaddsw %%ymm3,%%ymm0,%%ymm0 \n" \
+ "vpaddsw %%ymm3,%%ymm1,%%ymm1 \n" \
+ "vpaddsw %%ymm3,%%ymm2,%%ymm2 \n" \
+ "vpsraw $0x6,%%ymm0,%%ymm0 \n" \
+ "vpsraw $0x6,%%ymm1,%%ymm1 \n" \
+ "vpsraw $0x6,%%ymm2,%%ymm2 \n" \
+ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n" \
+ "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n" \
+ "vpackuswb %%ymm2,%%ymm2,%%ymm2 \n"
+
+#if defined(HAS_I422TOBGRAROW_AVX2)
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
+void OMITFP I422ToBGRARow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_bgra,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into BGRA
+ "vpunpcklbw %%ymm0,%%ymm1,%%ymm1 \n" // GB
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpunpcklbw %%ymm2,%%ymm5,%%ymm2 \n" // AR
+ "vpermq $0xd8,%%ymm2,%%ymm2 \n"
+ "vpunpcklwd %%ymm1,%%ymm2,%%ymm0 \n" // ARGB first 8 pixels
+ "vpunpckhwd %%ymm1,%%ymm2,%%ymm2 \n" // ARGB next 8 pixels
+
+ "vmovdqu %%ymm0," MEMACCESS([dst_bgra]) "\n"
+ "vmovdqu %%ymm2," MEMACCESS2(0x20,[dst_bgra]) "\n"
+ "lea " MEMLEA(0x40,[dst_bgra]) ",%[dst_bgra] \n"
+ "sub $0x10,%[width] \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_bgra]"+r"(dst_bgra), // %[dst_bgra]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_I422TOBGRAROW_AVX2
+
+#if defined(HAS_I422TOARGBROW_AVX2)
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+void OMITFP I422ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into ARGB
+ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" // BG
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" // RA
+ "vpermq $0xd8,%%ymm2,%%ymm2 \n"
+ "vpunpcklwd %%ymm2,%%ymm0,%%ymm1 \n" // BGRA first 8 pixels
+ "vpunpckhwd %%ymm2,%%ymm0,%%ymm0 \n" // BGRA next 8 pixels
+
+ "vmovdqu %%ymm1," MEMACCESS([dst_argb]) "\n"
+ "vmovdqu %%ymm0," MEMACCESS2(0x20,[dst_argb]) "\n"
+ "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n"
+ "sub $0x10,%[width] \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_I422TOARGBROW_AVX2
+
+#if defined(HAS_J422TOARGBROW_AVX2)
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+void OMITFP J422ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into ARGB
+ "vpunpcklbw %%ymm1,%%ymm0,%%ymm0 \n" // BG
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpunpcklbw %%ymm5,%%ymm2,%%ymm2 \n" // RA
+ "vpermq $0xd8,%%ymm2,%%ymm2 \n"
+ "vpunpcklwd %%ymm2,%%ymm0,%%ymm1 \n" // BGRA first 8 pixels
+ "vpunpckhwd %%ymm2,%%ymm0,%%ymm0 \n" // BGRA next 8 pixels
+
+ "vmovdqu %%ymm1," MEMACCESS([dst_argb]) "\n"
+ "vmovdqu %%ymm0," MEMACCESS2(0x20,[dst_argb]) "\n"
+ "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n"
+ "sub $0x10,%[width] \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvJConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_J422TOARGBROW_AVX2
+
+#if defined(HAS_I422TOABGRROW_AVX2)
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes).
+void OMITFP I422ToABGRRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into ABGR
+ "vpunpcklbw %%ymm1,%%ymm2,%%ymm1 \n" // RG
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpunpcklbw %%ymm5,%%ymm0,%%ymm2 \n" // BA
+ "vpermq $0xd8,%%ymm2,%%ymm2 \n"
+ "vpunpcklwd %%ymm2,%%ymm1,%%ymm0 \n" // RGBA first 8 pixels
+ "vpunpckhwd %%ymm2,%%ymm1,%%ymm1 \n" // RGBA next 8 pixels
+ "vmovdqu %%ymm0," MEMACCESS([dst_argb]) "\n"
+ "vmovdqu %%ymm1," MEMACCESS2(0x20,[dst_argb]) "\n"
+ "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n"
+ "sub $0x10,%[width] \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_I422TOABGRROW_AVX2
+
+#if defined(HAS_I422TORGBAROW_AVX2)
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
+void OMITFP I422ToRGBARow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "sub %[u_buf],%[v_buf] \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into RGBA
+ "vpunpcklbw %%ymm2,%%ymm1,%%ymm1 \n"
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpunpcklbw %%ymm0,%%ymm5,%%ymm2 \n"
+ "vpermq $0xd8,%%ymm2,%%ymm2 \n"
+ "vpunpcklwd %%ymm1,%%ymm2,%%ymm0 \n"
+ "vpunpckhwd %%ymm1,%%ymm2,%%ymm1 \n"
+ "vmovdqu %%ymm0," MEMACCESS([dst_argb]) "\n"
+ "vmovdqu %%ymm1," MEMACCESS2(0x20,[dst_argb]) "\n"
+ "lea " MEMLEA(0x40,[dst_argb]) ",%[dst_argb] \n"
+ "sub $0x10,%[width] \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : [y_buf]"+r"(y_buf), // %[y_buf]
+ [u_buf]"+r"(u_buf), // %[u_buf]
+ [v_buf]"+r"(v_buf), // %[v_buf]
+ [dst_argb]"+r"(dst_argb), // %[dst_argb]
+ [width]"+rm"(width) // %[width]
+ : [kYuvConstants]"r"(&kYuvConstants.kUVToB) // %[kYuvConstants]
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_I422TORGBAROW_AVX2
+
+#ifdef HAS_I400TOARGBROW_SSE2
+void I400ToARGBRow_SSE2(const uint8* y_buf, uint8* dst_argb, int width) {
+ asm volatile (
+ "mov $0x4a354a35,%%eax \n" // 4a35 = 18997 = 1.164
+ "movd %%eax,%%xmm2 \n"
+ "pshufd $0x0,%%xmm2,%%xmm2 \n"
+ "mov $0x04880488,%%eax \n" // 0488 = 1160 = 1.164 * 16
+ "movd %%eax,%%xmm3 \n"
+ "pshufd $0x0,%%xmm3,%%xmm3 \n"
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "pslld $0x18,%%xmm4 \n"
+ LABELALIGN
+ "1: \n"
+ // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "pmulhuw %%xmm2,%%xmm0 \n"
+ "psubusw %%xmm3,%%xmm0 \n"
+ "psrlw $6, %%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+
+ // Step 2: Weave into ARGB
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklwd %%xmm0,%%xmm0 \n"
+ "punpckhwd %%xmm1,%%xmm1 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "por %%xmm4,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(y_buf), // %0
+ "+r"(dst_argb), // %1
+ "+rm"(width) // %2
+ :
+ : "memory", "cc", "eax"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"
+ );
+}
+#endif // HAS_I400TOARGBROW_SSE2
+
+#ifdef HAS_I400TOARGBROW_AVX2
+// 16 pixels of Y converted to 16 pixels of ARGB (64 bytes).
+// note: vpunpcklbw mutates and vpackuswb unmutates.
+void I400ToARGBRow_AVX2(const uint8* y_buf, uint8* dst_argb, int width) {
+ asm volatile (
+ "mov $0x4a354a35,%%eax \n" // 0488 = 1160 = 1.164 * 16
+ "vmovd %%eax,%%xmm2 \n"
+ "vbroadcastss %%xmm2,%%ymm2 \n"
+ "mov $0x4880488,%%eax \n" // 4a35 = 18997 = 1.164
+ "vmovd %%eax,%%xmm3 \n"
+ "vbroadcastss %%xmm3,%%ymm3 \n"
+ "vpcmpeqb %%ymm4,%%ymm4,%%ymm4 \n"
+ "vpslld $0x18,%%ymm4,%%ymm4 \n"
+
+ LABELALIGN
+ "1: \n"
+ // Step 1: Scale Y contribution to 16 G values. G = (y - 16) * 1.164
+ "vmovdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpunpcklbw %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpmulhuw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpsubusw %%ymm3,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x6,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpunpcklbw %%ymm0,%%ymm0,%%ymm1 \n"
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpunpcklwd %%ymm1,%%ymm1,%%ymm0 \n"
+ "vpunpckhwd %%ymm1,%%ymm1,%%ymm1 \n"
+ "vpor %%ymm4,%%ymm0,%%ymm0 \n"
+ "vpor %%ymm4,%%ymm1,%%ymm1 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "vmovdqu %%ymm1," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(y_buf), // %0
+ "+r"(dst_argb), // %1
+ "+rm"(width) // %2
+ :
+ : "memory", "cc", "eax"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"
+ );
+}
+#endif // HAS_I400TOARGBROW_AVX2
+
+#ifdef HAS_MIRRORROW_SSSE3
+// Shuffle table for reversing the bytes.
+static uvec8 kShuffleMirror = {
+ 15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
+};
+
+void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
+ intptr_t temp_width = (intptr_t)(width);
+ asm volatile (
+ "movdqa %3,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ MEMOPREG(movdqu,-0x10,0,2,1,xmm0) // movdqu -0x10(%0,%2),%%xmm0
+ "pshufb %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(temp_width) // %2
+ : "m"(kShuffleMirror) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm5"
+ );
+}
+#endif // HAS_MIRRORROW_SSSE3
+
+#ifdef HAS_MIRRORROW_AVX2
+void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
+ intptr_t temp_width = (intptr_t)(width);
+ asm volatile (
+ "vbroadcastf128 %3,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ MEMOPREG(vmovdqu,-0x20,0,2,1,ymm0) // vmovdqu -0x20(%0,%2),%%ymm0
+ "vpshufb %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpermq $0x4e,%%ymm0,%%ymm0 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(temp_width) // %2
+ : "m"(kShuffleMirror) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm5"
+ );
+}
+#endif // HAS_MIRRORROW_AVX2
+
+#ifdef HAS_MIRRORROW_SSE2
+void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
+ intptr_t temp_width = (intptr_t)(width);
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ MEMOPREG(movdqu,-0x10,0,2,1,xmm0) // movdqu -0x10(%0,%2),%%xmm0
+ "movdqa %%xmm0,%%xmm1 \n"
+ "psllw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "por %%xmm1,%%xmm0 \n"
+ "pshuflw $0x1b,%%xmm0,%%xmm0 \n"
+ "pshufhw $0x1b,%%xmm0,%%xmm0 \n"
+ "pshufd $0x4e,%%xmm0,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1)",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(temp_width) // %2
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1"
+ );
+}
+#endif // HAS_MIRRORROW_SSE2
+
+#ifdef HAS_MIRRORROW_UV_SSSE3
+// Shuffle table for reversing the bytes of UV channels.
+static uvec8 kShuffleMirrorUV = {
+ 14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
+};
+void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
+ int width) {
+ intptr_t temp_width = (intptr_t)(width);
+ asm volatile (
+ "movdqa %4,%%xmm1 \n"
+ "lea " MEMLEA4(-0x10,0,3,2) ",%0 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(-0x10,0) ",%0 \n"
+ "pshufb %%xmm1,%%xmm0 \n"
+ "movlpd %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movhpd,xmm0,0x00,1,2,1) // movhpd %%xmm0,(%1,%2)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $8,%3 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(temp_width) // %3
+ : "m"(kShuffleMirrorUV) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1"
+ );
+}
+#endif // HAS_MIRRORROW_UV_SSSE3
+
+#ifdef HAS_ARGBMIRRORROW_SSE2
+
+void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
+ intptr_t temp_width = (intptr_t)(width);
+ asm volatile (
+ "lea " MEMLEA4(-0x10,0,2,4) ",%0 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "pshufd $0x1b,%%xmm0,%%xmm0 \n"
+ "lea " MEMLEA(-0x10,0) ",%0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(temp_width) // %2
+ :
+ : "memory", "cc"
+ , "xmm0"
+ );
+}
+#endif // HAS_ARGBMIRRORROW_SSE2
+
+#ifdef HAS_ARGBMIRRORROW_AVX2
+// Shuffle table for reversing the bytes.
+static const ulvec32 kARGBShuffleMirror_AVX2 = {
+ 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
+};
+void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
+ intptr_t temp_width = (intptr_t)(width);
+ asm volatile (
+ "vmovdqu %3,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ VMEMOPREG(vpermd,-0x20,0,2,4,ymm5,ymm0) // vpermd -0x20(%0,%2,4),ymm5,ymm0
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(temp_width) // %2
+ : "m"(kARGBShuffleMirror_AVX2) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm5"
+ );
+}
+#endif // HAS_ARGBMIRRORROW_AVX2
+
+#ifdef HAS_SPLITUVROW_AVX2
+void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm2 \n"
+ "vpsrlw $0x8,%%ymm1,%%ymm3 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%ymm3,%%ymm2,%%ymm2 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm2,%%ymm2 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ MEMOPMEM(vmovdqu,ymm2,0x00,1,2,1) // vmovdqu %%ymm2,(%1,%2)
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_uv), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_SPLITUVROW_AVX2
+
+#ifdef HAS_SPLITUVROW_SSE2
+void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "psrlw $0x8,%%xmm2 \n"
+ "psrlw $0x8,%%xmm3 \n"
+ "packuswb %%xmm3,%%xmm2 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movdqu,xmm2,0x00,1,2,1) // movdqu %%xmm2,(%1,%2)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_uv), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_SPLITUVROW_SSE2
+
+#ifdef HAS_MERGEUVROW_AVX2
+void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ MEMOPREG(vmovdqu,0x00,0,1,1,ymm1) // vmovdqu (%0,%1,1),%%ymm1
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "vpunpcklbw %%ymm1,%%ymm0,%%ymm2 \n"
+ "vpunpckhbw %%ymm1,%%ymm0,%%ymm0 \n"
+ "vextractf128 $0x0,%%ymm2," MEMACCESS(2) " \n"
+ "vextractf128 $0x0,%%ymm0," MEMACCESS2(0x10,2) "\n"
+ "vextractf128 $0x1,%%ymm2," MEMACCESS2(0x20,2) "\n"
+ "vextractf128 $0x1,%%ymm0," MEMACCESS2(0x30,2) "\n"
+ "lea " MEMLEA(0x40,2) ",%2 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_u), // %0
+ "+r"(src_v), // %1
+ "+r"(dst_uv), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2"
+ );
+}
+#endif // HAS_MERGEUVROW_AVX2
+
+#ifdef HAS_MERGEUVROW_SSE2
+void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,1,1,xmm1) // movdqu (%0,%1,1),%%xmm1
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "punpcklbw %%xmm1,%%xmm0 \n"
+ "punpckhbw %%xmm1,%%xmm2 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "movdqu %%xmm2," MEMACCESS2(0x10,2) " \n"
+ "lea " MEMLEA(0x20,2) ",%2 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_u), // %0
+ "+r"(src_v), // %1
+ "+r"(dst_uv), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2"
+ );
+}
+#endif // HAS_MERGEUVROW_SSE2
+
+#ifdef HAS_COPYROW_SSE2
+void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(count) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1"
+ );
+}
+#endif // HAS_COPYROW_SSE2
+
+#ifdef HAS_COPYROW_AVX
+void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "vmovdqu %%ymm1," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x40,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(count) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1"
+ );
+}
+#endif // HAS_COPYROW_AVX
+
+#ifdef HAS_COPYROW_ERMS
+// Multiple of 1.
+void CopyRow_ERMS(const uint8* src, uint8* dst, int width) {
+ size_t width_tmp = (size_t)(width);
+ asm volatile (
+ "rep movsb " MEMMOVESTRING(0,1) " \n"
+ : "+S"(src), // %0
+ "+D"(dst), // %1
+ "+c"(width_tmp) // %2
+ :
+ : "memory", "cc"
+ );
+}
+#endif // HAS_COPYROW_ERMS
+
+#ifdef HAS_ARGBCOPYALPHAROW_SSE2
+// width in pixels
+void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
+ asm volatile (
+ "pcmpeqb %%xmm0,%%xmm0 \n"
+ "pslld $0x18,%%xmm0 \n"
+ "pcmpeqb %%xmm1,%%xmm1 \n"
+ "psrld $0x8,%%xmm1 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm4 \n"
+ "movdqu " MEMACCESS2(0x10,1) ",%%xmm5 \n"
+ "pand %%xmm0,%%xmm2 \n"
+ "pand %%xmm0,%%xmm3 \n"
+ "pand %%xmm1,%%xmm4 \n"
+ "pand %%xmm1,%%xmm5 \n"
+ "por %%xmm4,%%xmm2 \n"
+ "por %%xmm5,%%xmm3 \n"
+ "movdqu %%xmm2," MEMACCESS(1) " \n"
+ "movdqu %%xmm3," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBCOPYALPHAROW_SSE2
+
+#ifdef HAS_ARGBCOPYALPHAROW_AVX2
+// width in pixels
+void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
+ asm volatile (
+ "vpcmpeqb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpsrld $0x8,%%ymm0,%%ymm0 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm1 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm2 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpblendvb %%ymm0," MEMACCESS(1) ",%%ymm1,%%ymm1 \n"
+ "vpblendvb %%ymm0," MEMACCESS2(0x20,1) ",%%ymm2,%%ymm2 \n"
+ "vmovdqu %%ymm1," MEMACCESS(1) " \n"
+ "vmovdqu %%ymm2," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2"
+ );
+}
+#endif // HAS_ARGBCOPYALPHAROW_AVX2
+
+#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
+// width in pixels
+void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
+ asm volatile (
+ "pcmpeqb %%xmm0,%%xmm0 \n"
+ "pslld $0x18,%%xmm0 \n"
+ "pcmpeqb %%xmm1,%%xmm1 \n"
+ "psrld $0x8,%%xmm1 \n"
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm2 \n"
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "punpcklbw %%xmm2,%%xmm2 \n"
+ "punpckhwd %%xmm2,%%xmm3 \n"
+ "punpcklwd %%xmm2,%%xmm2 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm4 \n"
+ "movdqu " MEMACCESS2(0x10,1) ",%%xmm5 \n"
+ "pand %%xmm0,%%xmm2 \n"
+ "pand %%xmm0,%%xmm3 \n"
+ "pand %%xmm1,%%xmm4 \n"
+ "pand %%xmm1,%%xmm5 \n"
+ "por %%xmm4,%%xmm2 \n"
+ "por %%xmm5,%%xmm3 \n"
+ "movdqu %%xmm2," MEMACCESS(1) " \n"
+ "movdqu %%xmm3," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBCOPYYTOALPHAROW_SSE2
+
+#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
+// width in pixels
+void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
+ asm volatile (
+ "vpcmpeqb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpsrld $0x8,%%ymm0,%%ymm0 \n"
+ LABELALIGN
+ "1: \n"
+ "vpmovzxbd " MEMACCESS(0) ",%%ymm1 \n"
+ "vpmovzxbd " MEMACCESS2(0x8,0) ",%%ymm2 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "vpslld $0x18,%%ymm1,%%ymm1 \n"
+ "vpslld $0x18,%%ymm2,%%ymm2 \n"
+ "vpblendvb %%ymm0," MEMACCESS(1) ",%%ymm1,%%ymm1 \n"
+ "vpblendvb %%ymm0," MEMACCESS2(0x20,1) ",%%ymm2,%%ymm2 \n"
+ "vmovdqu %%ymm1," MEMACCESS(1) " \n"
+ "vmovdqu %%ymm2," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2"
+ );
+}
+#endif // HAS_ARGBCOPYYTOALPHAROW_AVX2
+
+#ifdef HAS_SETROW_X86
+void SetRow_X86(uint8* dst, uint8 v8, int width) {
+ size_t width_tmp = (size_t)(width >> 2);
+ const uint32 v32 = v8 * 0x01010101; // Duplicate byte to all bytes.
+ asm volatile (
+ "rep stosl " MEMSTORESTRING(eax,0) " \n"
+ : "+D"(dst), // %0
+ "+c"(width_tmp) // %1
+ : "a"(v32) // %2
+ : "memory", "cc");
+}
+
+void SetRow_ERMS(uint8* dst, uint8 v8, int width) {
+ size_t width_tmp = (size_t)(width);
+ asm volatile (
+ "rep stosb " MEMSTORESTRING(al,0) " \n"
+ : "+D"(dst), // %0
+ "+c"(width_tmp) // %1
+ : "a"(v8) // %2
+ : "memory", "cc");
+}
+
+void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int width) {
+ size_t width_tmp = (size_t)(width);
+ asm volatile (
+ "rep stosl " MEMSTORESTRING(eax,0) " \n"
+ : "+D"(dst_argb), // %0
+ "+c"(width_tmp) // %1
+ : "a"(v32) // %2
+ : "memory", "cc");
+}
+#endif // HAS_SETROW_X86
+
+#ifdef HAS_YUY2TOYROW_SSE2
+void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2
+ MEMOPREG(movdqu,0x10,0,4,1,xmm3) // movdqu 0x10(%0,%4,1),%%xmm3
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm1 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movq,xmm1,0x00,1,2,1) // movq %%xmm1,(%1,%2)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ : "r"((intptr_t)(stride_yuy2)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm1 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movq,xmm1,0x00,1,2,1) // movq %%xmm1,(%1,%2)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void UYVYToYRow_SSE2(const uint8* src_uyvy, uint8* dst_y, int pix) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1"
+ );
+}
+
+void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2
+ MEMOPREG(movdqu,0x10,0,4,1,xmm3) // movdqu 0x10(%0,%4,1),%%xmm3
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm1 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movq,xmm1,0x00,1,2,1) // movq %%xmm1,(%1,%2)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ : "r"((intptr_t)(stride_uyvy)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm1 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ MEMOPMEM(movq,xmm1,0x00,1,2,1) // movq %%xmm1,(%1,%2)
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // HAS_YUY2TOYROW_SSE2
+
+#ifdef HAS_YUY2TOYROW_AVX2
+void YUY2ToYRow_AVX2(const uint8* src_yuy2, uint8* dst_y, int pix) {
+ asm volatile (
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ VMEMOPREG(vpavgb,0x00,0,4,1,ymm0,ymm0) // vpavgb (%0,%4,1),%%ymm0,%%ymm0
+ VMEMOPREG(vpavgb,0x20,0,4,1,ymm1,ymm1)
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x8,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm1 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vextractf128 $0x0,%%ymm1," MEMACCESS(1) " \n"
+ VEXTOPMEM(vextractf128,0,ymm0,0x00,1,2,1) // vextractf128 $0x0,%%ymm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ : "r"((intptr_t)(stride_yuy2)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x8,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm1 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vextractf128 $0x0,%%ymm1," MEMACCESS(1) " \n"
+ VEXTOPMEM(vextractf128,0,ymm0,0x00,1,2,1) // vextractf128 $0x0,%%ymm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void UYVYToYRow_AVX2(const uint8* src_uyvy, uint8* dst_y, int pix) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x8,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm5"
+ );
+}
+void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
+ "sub %1,%2 \n"
+
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ VMEMOPREG(vpavgb,0x00,0,4,1,ymm0,ymm0) // vpavgb (%0,%4,1),%%ymm0,%%ymm0
+ VMEMOPREG(vpavgb,0x20,0,4,1,ymm1,ymm1)
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm1 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vextractf128 $0x0,%%ymm1," MEMACCESS(1) " \n"
+ VEXTOPMEM(vextractf128,0,ymm0,0x00,1,2,1) // vextractf128 $0x0,%%ymm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ : "r"((intptr_t)(stride_uyvy)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpsrlw $0x8,%%ymm5,%%ymm5 \n"
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpand %%ymm5,%%ymm0,%%ymm1 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%ymm1,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm1,%%ymm1 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vextractf128 $0x0,%%ymm1," MEMACCESS(1) " \n"
+ VEXTOPMEM(vextractf128,0,ymm0,0x00,1,2,1) // vextractf128 $0x0,%%ymm0,(%1,%2,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x20,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // HAS_YUY2TOYROW_AVX2
+
+#ifdef HAS_ARGBBLENDROW_SSE2
+// Blend 8 pixels at a time.
+void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "pcmpeqb %%xmm7,%%xmm7 \n"
+ "psrlw $0xf,%%xmm7 \n"
+ "pcmpeqb %%xmm6,%%xmm6 \n"
+ "psrlw $0x8,%%xmm6 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psllw $0x8,%%xmm5 \n"
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "pslld $0x18,%%xmm4 \n"
+ "sub $0x4,%3 \n"
+ "jl 49f \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "41: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm3,%%xmm0 \n"
+ "pxor %%xmm4,%%xmm3 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm2 \n"
+ "psrlw $0x8,%%xmm3 \n"
+ "pshufhw $0xf5,%%xmm3,%%xmm3 \n"
+ "pshuflw $0xf5,%%xmm3,%%xmm3 \n"
+ "pand %%xmm6,%%xmm2 \n"
+ "paddw %%xmm7,%%xmm3 \n"
+ "pmullw %%xmm3,%%xmm2 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm1 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "pmullw %%xmm3,%%xmm1 \n"
+ "psrlw $0x8,%%xmm2 \n"
+ "paddusb %%xmm2,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jge 41b \n"
+
+ "49: \n"
+ "add $0x3,%3 \n"
+ "jl 99f \n"
+
+ // 1 pixel loop.
+ "91: \n"
+ "movd " MEMACCESS(0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x4,0) ",%0 \n"
+ "movdqa %%xmm3,%%xmm0 \n"
+ "pxor %%xmm4,%%xmm3 \n"
+ "movd " MEMACCESS(1) ",%%xmm2 \n"
+ "psrlw $0x8,%%xmm3 \n"
+ "pshufhw $0xf5,%%xmm3,%%xmm3 \n"
+ "pshuflw $0xf5,%%xmm3,%%xmm3 \n"
+ "pand %%xmm6,%%xmm2 \n"
+ "paddw %%xmm7,%%xmm3 \n"
+ "pmullw %%xmm3,%%xmm2 \n"
+ "movd " MEMACCESS(1) ",%%xmm1 \n"
+ "lea " MEMLEA(0x4,1) ",%1 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "pmullw %%xmm3,%%xmm1 \n"
+ "psrlw $0x8,%%xmm2 \n"
+ "paddusb %%xmm2,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movd %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x4,2) ",%2 \n"
+ "sub $0x1,%3 \n"
+ "jge 91b \n"
+ "99: \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBBLENDROW_SSE2
+
+#ifdef HAS_ARGBBLENDROW_SSSE3
+// Shuffle table for isolating alpha.
+static uvec8 kShuffleAlpha = {
+ 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
+ 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
+};
+
+// Blend 8 pixels at a time
+// Shuffle table for reversing the bytes.
+
+// Same as SSE2, but replaces
+// psrlw xmm3, 8 // alpha
+// pshufhw xmm3, xmm3,0F5h // 8 alpha words
+// pshuflw xmm3, xmm3,0F5h
+// with..
+// pshufb xmm3, kShuffleAlpha // alpha
+
+void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "pcmpeqb %%xmm7,%%xmm7 \n"
+ "psrlw $0xf,%%xmm7 \n"
+ "pcmpeqb %%xmm6,%%xmm6 \n"
+ "psrlw $0x8,%%xmm6 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psllw $0x8,%%xmm5 \n"
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "pslld $0x18,%%xmm4 \n"
+ "sub $0x4,%3 \n"
+ "jl 49f \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "40: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm3,%%xmm0 \n"
+ "pxor %%xmm4,%%xmm3 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm2 \n"
+ "pshufb %4,%%xmm3 \n"
+ "pand %%xmm6,%%xmm2 \n"
+ "paddw %%xmm7,%%xmm3 \n"
+ "pmullw %%xmm3,%%xmm2 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm1 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "pmullw %%xmm3,%%xmm1 \n"
+ "psrlw $0x8,%%xmm2 \n"
+ "paddusb %%xmm2,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jge 40b \n"
+
+ "49: \n"
+ "add $0x3,%3 \n"
+ "jl 99f \n"
+
+ // 1 pixel loop.
+ "91: \n"
+ "movd " MEMACCESS(0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x4,0) ",%0 \n"
+ "movdqa %%xmm3,%%xmm0 \n"
+ "pxor %%xmm4,%%xmm3 \n"
+ "movd " MEMACCESS(1) ",%%xmm2 \n"
+ "pshufb %4,%%xmm3 \n"
+ "pand %%xmm6,%%xmm2 \n"
+ "paddw %%xmm7,%%xmm3 \n"
+ "pmullw %%xmm3,%%xmm2 \n"
+ "movd " MEMACCESS(1) ",%%xmm1 \n"
+ "lea " MEMLEA(0x4,1) ",%1 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "por %%xmm4,%%xmm0 \n"
+ "pmullw %%xmm3,%%xmm1 \n"
+ "psrlw $0x8,%%xmm2 \n"
+ "paddusb %%xmm2,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movd %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x4,2) ",%2 \n"
+ "sub $0x1,%3 \n"
+ "jge 91b \n"
+ "99: \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ : "m"(kShuffleAlpha) // %4
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBBLENDROW_SSSE3
+
+#ifdef HAS_ARGBATTENUATEROW_SSE2
+// Attenuate 4 pixels at a time.
+void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "pslld $0x18,%%xmm4 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrld $0x8,%%xmm5 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "pshufhw $0xff,%%xmm0,%%xmm2 \n"
+ "pshuflw $0xff,%%xmm2,%%xmm2 \n"
+ "pmulhuw %%xmm2,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "punpckhbw %%xmm1,%%xmm1 \n"
+ "pshufhw $0xff,%%xmm1,%%xmm2 \n"
+ "pshuflw $0xff,%%xmm2,%%xmm2 \n"
+ "pmulhuw %%xmm2,%%xmm1 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm2 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "pand %%xmm4,%%xmm2 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "por %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBATTENUATEROW_SSE2
+
+#ifdef HAS_ARGBATTENUATEROW_SSSE3
+// Shuffle table duplicating alpha
+static uvec8 kShuffleAlpha0 = {
+ 3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u
+};
+static uvec8 kShuffleAlpha1 = {
+ 11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
+ 15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u
+};
+// Attenuate 4 pixels at a time.
+void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ "pcmpeqb %%xmm3,%%xmm3 \n"
+ "pslld $0x18,%%xmm3 \n"
+ "movdqa %3,%%xmm4 \n"
+ "movdqa %4,%%xmm5 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "punpcklbw %%xmm1,%%xmm1 \n"
+ "pmulhuw %%xmm1,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "pshufb %%xmm5,%%xmm1 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm2 \n"
+ "punpckhbw %%xmm2,%%xmm2 \n"
+ "pmulhuw %%xmm2,%%xmm1 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm2 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "pand %%xmm3,%%xmm2 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "por %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "m"(kShuffleAlpha0), // %3
+ "m"(kShuffleAlpha1) // %4
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBATTENUATEROW_SSSE3
+
+#ifdef HAS_ARGBATTENUATEROW_AVX2
+// Shuffle table duplicating alpha.
+static const uvec8 kShuffleAlpha_AVX2 = {
+ 6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
+};
+// Attenuate 8 pixels at a time.
+void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ "vbroadcastf128 %3,%%ymm4 \n"
+ "vpcmpeqb %%ymm5,%%ymm5,%%ymm5 \n"
+ "vpslld $0x18,%%ymm5,%%ymm5 \n"
+ "sub %0,%1 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm6 \n"
+ "vpunpcklbw %%ymm6,%%ymm6,%%ymm0 \n"
+ "vpunpckhbw %%ymm6,%%ymm6,%%ymm1 \n"
+ "vpshufb %%ymm4,%%ymm0,%%ymm2 \n"
+ "vpshufb %%ymm4,%%ymm1,%%ymm3 \n"
+ "vpmulhuw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpmulhuw %%ymm3,%%ymm1,%%ymm1 \n"
+ "vpand %%ymm5,%%ymm6,%%ymm6 \n"
+ "vpsrlw $0x8,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x8,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpor %%ymm6,%%ymm0,%%ymm0 \n"
+ MEMOPMEM(vmovdqu,ymm0,0x00,0,1,1) // vmovdqu %%ymm0,(%0,%1)
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "m"(kShuffleAlpha_AVX2) // %3
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+#endif // HAS_ARGBATTENUATEROW_AVX2
+
+#ifdef HAS_ARGBUNATTENUATEROW_SSE2
+// Unattenuate 4 pixels at a time.
+void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
+ int width) {
+ uintptr_t alpha = 0;
+ asm volatile (
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movzb " MEMACCESS2(0x03,0) ",%3 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ MEMOPREG(movd,0x00,4,3,4,xmm2) // movd 0x0(%4,%3,4),%%xmm2
+ "movzb " MEMACCESS2(0x07,0) ",%3 \n"
+ MEMOPREG(movd,0x00,4,3,4,xmm3) // movd 0x0(%4,%3,4),%%xmm3
+ "pshuflw $0x40,%%xmm2,%%xmm2 \n"
+ "pshuflw $0x40,%%xmm3,%%xmm3 \n"
+ "movlhps %%xmm3,%%xmm2 \n"
+ "pmulhuw %%xmm2,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "movzb " MEMACCESS2(0x0b,0) ",%3 \n"
+ "punpckhbw %%xmm1,%%xmm1 \n"
+ MEMOPREG(movd,0x00,4,3,4,xmm2) // movd 0x0(%4,%3,4),%%xmm2
+ "movzb " MEMACCESS2(0x0f,0) ",%3 \n"
+ MEMOPREG(movd,0x00,4,3,4,xmm3) // movd 0x0(%4,%3,4),%%xmm3
+ "pshuflw $0x40,%%xmm2,%%xmm2 \n"
+ "pshuflw $0x40,%%xmm3,%%xmm3 \n"
+ "movlhps %%xmm3,%%xmm2 \n"
+ "pmulhuw %%xmm2,%%xmm1 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width), // %2
+ "+r"(alpha) // %3
+ : "r"(fixed_invtbl8) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBUNATTENUATEROW_SSE2
+
+#ifdef HAS_ARGBUNATTENUATEROW_AVX2
+// Shuffle table duplicating alpha.
+static const uvec8 kUnattenShuffleAlpha_AVX2 = {
+ 0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
+};
+// Unattenuate 8 pixels at a time.
+void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
+ int width) {
+ uintptr_t alpha = 0;
+ asm volatile (
+ "sub %0,%1 \n"
+ "vbroadcastf128 %5,%%ymm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ // replace VPGATHER
+ "movzb " MEMACCESS2(0x03,0) ",%3 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm0) // vmovd 0x0(%4,%3,4),%%xmm0
+ "movzb " MEMACCESS2(0x07,0) ",%3 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm1) // vmovd 0x0(%4,%3,4),%%xmm1
+ "movzb " MEMACCESS2(0x0b,0) ",%3 \n"
+ "vpunpckldq %%xmm1,%%xmm0,%%xmm6 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm2) // vmovd 0x0(%4,%3,4),%%xmm2
+ "movzb " MEMACCESS2(0x0f,0) ",%3 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm3) // vmovd 0x0(%4,%3,4),%%xmm3
+ "movzb " MEMACCESS2(0x13,0) ",%3 \n"
+ "vpunpckldq %%xmm3,%%xmm2,%%xmm7 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm0) // vmovd 0x0(%4,%3,4),%%xmm0
+ "movzb " MEMACCESS2(0x17,0) ",%3 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm1) // vmovd 0x0(%4,%3,4),%%xmm1
+ "movzb " MEMACCESS2(0x1b,0) ",%3 \n"
+ "vpunpckldq %%xmm1,%%xmm0,%%xmm0 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm2) // vmovd 0x0(%4,%3,4),%%xmm2
+ "movzb " MEMACCESS2(0x1f,0) ",%3 \n"
+ MEMOPREG(vmovd,0x00,4,3,4,xmm3) // vmovd 0x0(%4,%3,4),%%xmm3
+ "vpunpckldq %%xmm3,%%xmm2,%%xmm2 \n"
+ "vpunpcklqdq %%xmm7,%%xmm6,%%xmm3 \n"
+ "vpunpcklqdq %%xmm2,%%xmm0,%%xmm0 \n"
+ "vinserti128 $0x1,%%xmm0,%%ymm3,%%ymm3 \n"
+ // end of VPGATHER
+
+ "vmovdqu " MEMACCESS(0) ",%%ymm6 \n"
+ "vpunpcklbw %%ymm6,%%ymm6,%%ymm0 \n"
+ "vpunpckhbw %%ymm6,%%ymm6,%%ymm1 \n"
+ "vpunpcklwd %%ymm3,%%ymm3,%%ymm2 \n"
+ "vpunpckhwd %%ymm3,%%ymm3,%%ymm3 \n"
+ "vpshufb %%ymm5,%%ymm2,%%ymm2 \n"
+ "vpshufb %%ymm5,%%ymm3,%%ymm3 \n"
+ "vpmulhuw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpmulhuw %%ymm3,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ MEMOPMEM(vmovdqu,ymm0,0x00,0,1,1) // vmovdqu %%ymm0,(%0,%1)
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width), // %2
+ "+r"(alpha) // %3
+ : "r"(fixed_invtbl8), // %4
+ "m"(kUnattenShuffleAlpha_AVX2) // %5
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBUNATTENUATEROW_AVX2
+
+#ifdef HAS_ARGBGRAYROW_SSSE3
+// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels
+void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ "movdqa %3,%%xmm4 \n"
+ "movdqa %4,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm0 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "phaddw %%xmm1,%%xmm0 \n"
+ "paddw %%xmm5,%%xmm0 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm3 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "psrld $0x18,%%xmm2 \n"
+ "psrld $0x18,%%xmm3 \n"
+ "packuswb %%xmm3,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm3 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "punpcklbw %%xmm2,%%xmm3 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklwd %%xmm3,%%xmm0 \n"
+ "punpckhwd %%xmm3,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "m"(kARGBToYJ), // %3
+ "m"(kAddYJ64) // %4
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBGRAYROW_SSSE3
+
+#ifdef HAS_ARGBSEPIAROW_SSSE3
+// b = (r * 35 + g * 68 + b * 17) >> 7
+// g = (r * 45 + g * 88 + b * 22) >> 7
+// r = (r * 50 + g * 98 + b * 24) >> 7
+// Constant for ARGB color to sepia tone
+static vec8 kARGBToSepiaB = {
+ 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
+};
+
+static vec8 kARGBToSepiaG = {
+ 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
+};
+
+static vec8 kARGBToSepiaR = {
+ 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
+};
+
+// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
+void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
+ asm volatile (
+ "movdqa %2,%%xmm2 \n"
+ "movdqa %3,%%xmm3 \n"
+ "movdqa %4,%%xmm4 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm6 \n"
+ "pmaddubsw %%xmm2,%%xmm0 \n"
+ "pmaddubsw %%xmm2,%%xmm6 \n"
+ "phaddw %%xmm6,%%xmm0 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm5 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm5 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "phaddw %%xmm1,%%xmm5 \n"
+ "psrlw $0x7,%%xmm5 \n"
+ "packuswb %%xmm5,%%xmm5 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm5 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm5 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "phaddw %%xmm1,%%xmm5 \n"
+ "psrlw $0x7,%%xmm5 \n"
+ "packuswb %%xmm5,%%xmm5 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm6 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "psrld $0x18,%%xmm6 \n"
+ "psrld $0x18,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "punpcklbw %%xmm6,%%xmm5 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklwd %%xmm5,%%xmm0 \n"
+ "punpckhwd %%xmm5,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(0) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,0) " \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "sub $0x8,%1 \n"
+ "jg 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(width) // %1
+ : "m"(kARGBToSepiaB), // %2
+ "m"(kARGBToSepiaG), // %3
+ "m"(kARGBToSepiaR) // %4
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+#endif // HAS_ARGBSEPIAROW_SSSE3
+
+#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
+// Tranform 8 ARGB pixels (32 bytes) with color matrix.
+// Same as Sepia except matrix is provided.
+void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width) {
+ asm volatile (
+ "movdqu " MEMACCESS(3) ",%%xmm5 \n"
+ "pshufd $0x00,%%xmm5,%%xmm2 \n"
+ "pshufd $0x55,%%xmm5,%%xmm3 \n"
+ "pshufd $0xaa,%%xmm5,%%xmm4 \n"
+ "pshufd $0xff,%%xmm5,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm7 \n"
+ "pmaddubsw %%xmm2,%%xmm0 \n"
+ "pmaddubsw %%xmm2,%%xmm7 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm6 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "pmaddubsw %%xmm3,%%xmm6 \n"
+ "pmaddubsw %%xmm3,%%xmm1 \n"
+ "phaddsw %%xmm7,%%xmm0 \n"
+ "phaddsw %%xmm1,%%xmm6 \n"
+ "psraw $0x6,%%xmm0 \n"
+ "psraw $0x6,%%xmm6 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "punpcklbw %%xmm6,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm7 \n"
+ "pmaddubsw %%xmm4,%%xmm1 \n"
+ "pmaddubsw %%xmm4,%%xmm7 \n"
+ "phaddsw %%xmm7,%%xmm1 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm6 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm7 \n"
+ "pmaddubsw %%xmm5,%%xmm6 \n"
+ "pmaddubsw %%xmm5,%%xmm7 \n"
+ "phaddsw %%xmm7,%%xmm6 \n"
+ "psraw $0x6,%%xmm1 \n"
+ "psraw $0x6,%%xmm6 \n"
+ "packuswb %%xmm1,%%xmm1 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "punpcklbw %%xmm6,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm6 \n"
+ "punpcklwd %%xmm1,%%xmm0 \n"
+ "punpckhwd %%xmm1,%%xmm6 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm6," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(matrix_argb) // %3
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBCOLORMATRIXROW_SSSE3
+
+#ifdef HAS_ARGBQUANTIZEROW_SSE2
+// Quantize 4 ARGB pixels (16 bytes).
+void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width) {
+ asm volatile (
+ "movd %2,%%xmm2 \n"
+ "movd %3,%%xmm3 \n"
+ "movd %4,%%xmm4 \n"
+ "pshuflw $0x40,%%xmm2,%%xmm2 \n"
+ "pshufd $0x44,%%xmm2,%%xmm2 \n"
+ "pshuflw $0x40,%%xmm3,%%xmm3 \n"
+ "pshufd $0x44,%%xmm3,%%xmm3 \n"
+ "pshuflw $0x40,%%xmm4,%%xmm4 \n"
+ "pshufd $0x44,%%xmm4,%%xmm4 \n"
+ "pxor %%xmm5,%%xmm5 \n"
+ "pcmpeqb %%xmm6,%%xmm6 \n"
+ "pslld $0x18,%%xmm6 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "pmulhuw %%xmm2,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm1 \n"
+ "punpckhbw %%xmm5,%%xmm1 \n"
+ "pmulhuw %%xmm2,%%xmm1 \n"
+ "pmullw %%xmm3,%%xmm0 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm7 \n"
+ "pmullw %%xmm3,%%xmm1 \n"
+ "pand %%xmm6,%%xmm7 \n"
+ "paddw %%xmm4,%%xmm0 \n"
+ "paddw %%xmm4,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "por %%xmm7,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(0) " \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "sub $0x4,%1 \n"
+ "jg 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(width) // %1
+ : "r"(scale), // %2
+ "r"(interval_size), // %3
+ "r"(interval_offset) // %4
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBQUANTIZEROW_SSE2
+
+#ifdef HAS_ARGBSHADEROW_SSE2
+// Shade 4 pixels at a time by specified value.
+void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value) {
+ asm volatile (
+ "movd %3,%%xmm2 \n"
+ "punpcklbw %%xmm2,%%xmm2 \n"
+ "punpcklqdq %%xmm2,%%xmm2 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "punpckhbw %%xmm1,%%xmm1 \n"
+ "pmulhuw %%xmm2,%%xmm0 \n"
+ "pmulhuw %%xmm2,%%xmm1 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(value) // %3
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2"
+ );
+}
+#endif // HAS_ARGBSHADEROW_SSE2
+
+#ifdef HAS_ARGBMULTIPLYROW_SSE2
+// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
+void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "pxor %%xmm5,%%xmm5 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm2 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "movdqu %%xmm0,%%xmm1 \n"
+ "movdqu %%xmm2,%%xmm3 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "punpckhbw %%xmm1,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm2 \n"
+ "punpckhbw %%xmm5,%%xmm3 \n"
+ "pmulhuw %%xmm2,%%xmm0 \n"
+ "pmulhuw %%xmm3,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_ARGBMULTIPLYROW_SSE2
+
+#ifdef HAS_ARGBMULTIPLYROW_AVX2
+// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
+void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "vpxor %%ymm5,%%ymm5,%%ymm5 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "vmovdqu " MEMACCESS(1) ",%%ymm3 \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "vpunpcklbw %%ymm1,%%ymm1,%%ymm0 \n"
+ "vpunpckhbw %%ymm1,%%ymm1,%%ymm1 \n"
+ "vpunpcklbw %%ymm5,%%ymm3,%%ymm2 \n"
+ "vpunpckhbw %%ymm5,%%ymm3,%%ymm3 \n"
+ "vpmulhuw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpmulhuw %%ymm3,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vmovdqu %%ymm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x20,2) ",%2 \n"
+ "sub $0x8,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+#if defined(__AVX2__)
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+#endif
+ );
+}
+#endif // HAS_ARGBMULTIPLYROW_AVX2
+
+#ifdef HAS_ARGBADDROW_SSE2
+// Add 2 rows of ARGB pixels together, 4 pixels at a time.
+void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm1 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1"
+ );
+}
+#endif // HAS_ARGBADDROW_SSE2
+
+#ifdef HAS_ARGBADDROW_AVX2
+// Add 2 rows of ARGB pixels together, 4 pixels at a time.
+void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "vpaddusb " MEMACCESS(1) ",%%ymm0,%%ymm0 \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "vmovdqu %%ymm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x20,2) ",%2 \n"
+ "sub $0x8,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0"
+ );
+}
+#endif // HAS_ARGBADDROW_AVX2
+
+#ifdef HAS_ARGBSUBTRACTROW_SSE2
+// Subtract 2 rows of ARGB pixels, 4 pixels at a time.
+void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqu " MEMACCESS(1) ",%%xmm1 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "psubusb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1"
+ );
+}
+#endif // HAS_ARGBSUBTRACTROW_SSE2
+
+#ifdef HAS_ARGBSUBTRACTROW_AVX2
+// Subtract 2 rows of ARGB pixels, 8 pixels at a time.
+void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "vpsubusb " MEMACCESS(1) ",%%ymm0,%%ymm0 \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "vmovdqu %%ymm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x20,2) ",%2 \n"
+ "sub $0x8,%3 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0"
+ );
+}
+#endif // HAS_ARGBSUBTRACTROW_AVX2
+
+#ifdef HAS_SOBELXROW_SSE2
+// SobelX as a matrix is
+// -1 0 1
+// -2 0 2
+// -1 0 1
+void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobelx, int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ "sub %0,%2 \n"
+ "sub %0,%3 \n"
+ "pxor %%xmm5,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ "movq " MEMACCESS2(0x2,0) ",%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpcklbw %%xmm5,%%xmm1 \n"
+ "psubw %%xmm1,%%xmm0 \n"
+ MEMOPREG(movq,0x00,0,1,1,xmm1) // movq (%0,%1,1),%%xmm1
+ MEMOPREG(movq,0x02,0,1,1,xmm2) // movq 0x2(%0,%1,1),%%xmm2
+ "punpcklbw %%xmm5,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm2 \n"
+ "psubw %%xmm2,%%xmm1 \n"
+ MEMOPREG(movq,0x00,0,2,1,xmm2) // movq (%0,%2,1),%%xmm2
+ MEMOPREG(movq,0x02,0,2,1,xmm3) // movq 0x2(%0,%2,1),%%xmm3
+ "punpcklbw %%xmm5,%%xmm2 \n"
+ "punpcklbw %%xmm5,%%xmm3 \n"
+ "psubw %%xmm3,%%xmm2 \n"
+ "paddw %%xmm2,%%xmm0 \n"
+ "paddw %%xmm1,%%xmm0 \n"
+ "paddw %%xmm1,%%xmm0 \n"
+ "pxor %%xmm1,%%xmm1 \n"
+ "psubw %%xmm0,%%xmm1 \n"
+ "pmaxsw %%xmm1,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ MEMOPMEM(movq,xmm0,0x00,0,3,1) // movq %%xmm0,(%0,%3,1)
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "sub $0x8,%4 \n"
+ "jg 1b \n"
+ : "+r"(src_y0), // %0
+ "+r"(src_y1), // %1
+ "+r"(src_y2), // %2
+ "+r"(dst_sobelx), // %3
+ "+r"(width) // %4
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_SOBELXROW_SSE2
+
+#ifdef HAS_SOBELYROW_SSE2
+// SobelY as a matrix is
+// -1 -2 -1
+// 0 0 0
+// 1 2 1
+void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ "sub %0,%2 \n"
+ "pxor %%xmm5,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movq,0x00,0,1,1,xmm1) // movq (%0,%1,1),%%xmm1
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpcklbw %%xmm5,%%xmm1 \n"
+ "psubw %%xmm1,%%xmm0 \n"
+ "movq " MEMACCESS2(0x1,0) ",%%xmm1 \n"
+ MEMOPREG(movq,0x01,0,1,1,xmm2) // movq 0x1(%0,%1,1),%%xmm2
+ "punpcklbw %%xmm5,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm2 \n"
+ "psubw %%xmm2,%%xmm1 \n"
+ "movq " MEMACCESS2(0x2,0) ",%%xmm2 \n"
+ MEMOPREG(movq,0x02,0,1,1,xmm3) // movq 0x2(%0,%1,1),%%xmm3
+ "punpcklbw %%xmm5,%%xmm2 \n"
+ "punpcklbw %%xmm5,%%xmm3 \n"
+ "psubw %%xmm3,%%xmm2 \n"
+ "paddw %%xmm2,%%xmm0 \n"
+ "paddw %%xmm1,%%xmm0 \n"
+ "paddw %%xmm1,%%xmm0 \n"
+ "pxor %%xmm1,%%xmm1 \n"
+ "psubw %%xmm0,%%xmm1 \n"
+ "pmaxsw %%xmm1,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ MEMOPMEM(movq,xmm0,0x00,0,2,1) // movq %%xmm0,(%0,%2,1)
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "sub $0x8,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_y0), // %0
+ "+r"(src_y1), // %1
+ "+r"(dst_sobely), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_SOBELYROW_SSE2
+
+#ifdef HAS_SOBELROW_SSE2
+// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
+// A = 255
+// R = Sobel
+// G = Sobel
+// B = Sobel
+void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "pslld $0x18,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,1,1,xmm1) // movdqu (%0,%1,1),%%xmm1
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "punpcklbw %%xmm0,%%xmm2 \n"
+ "punpckhbw %%xmm0,%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm1 \n"
+ "punpcklwd %%xmm2,%%xmm1 \n"
+ "punpckhwd %%xmm2,%%xmm2 \n"
+ "por %%xmm5,%%xmm1 \n"
+ "por %%xmm5,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm3 \n"
+ "punpcklwd %%xmm0,%%xmm3 \n"
+ "punpckhwd %%xmm0,%%xmm0 \n"
+ "por %%xmm5,%%xmm3 \n"
+ "por %%xmm5,%%xmm0 \n"
+ "movdqu %%xmm1," MEMACCESS(2) " \n"
+ "movdqu %%xmm2," MEMACCESS2(0x10,2) " \n"
+ "movdqu %%xmm3," MEMACCESS2(0x20,2) " \n"
+ "movdqu %%xmm0," MEMACCESS2(0x30,2) " \n"
+ "lea " MEMLEA(0x40,2) ",%2 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+#endif // HAS_SOBELROW_SSE2
+
+#ifdef HAS_SOBELTOPLANEROW_SSE2
+// Adds Sobel X and Sobel Y and stores Sobel into a plane.
+void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "pslld $0x18,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,1,1,xmm1) // movdqu (%0,%1,1),%%xmm1
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_y), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1"
+ );
+}
+#endif // HAS_SOBELTOPLANEROW_SSE2
+
+#ifdef HAS_SOBELXYROW_SSE2
+// Mixes Sobel X, Sobel Y and Sobel into ARGB.
+// A = 255
+// R = Sobel X
+// G = Sobel
+// B = Sobel Y
+void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "sub %0,%1 \n"
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+
+ // 8 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,1,1,xmm1) // movdqu (%0,%1,1),%%xmm1
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "paddusb %%xmm1,%%xmm2 \n"
+ "movdqa %%xmm0,%%xmm3 \n"
+ "punpcklbw %%xmm5,%%xmm3 \n"
+ "punpckhbw %%xmm5,%%xmm0 \n"
+ "movdqa %%xmm1,%%xmm4 \n"
+ "punpcklbw %%xmm2,%%xmm4 \n"
+ "punpckhbw %%xmm2,%%xmm1 \n"
+ "movdqa %%xmm4,%%xmm6 \n"
+ "punpcklwd %%xmm3,%%xmm6 \n"
+ "punpckhwd %%xmm3,%%xmm4 \n"
+ "movdqa %%xmm1,%%xmm7 \n"
+ "punpcklwd %%xmm0,%%xmm7 \n"
+ "punpckhwd %%xmm0,%%xmm1 \n"
+ "movdqu %%xmm6," MEMACCESS(2) " \n"
+ "movdqu %%xmm4," MEMACCESS2(0x10,2) " \n"
+ "movdqu %%xmm7," MEMACCESS2(0x20,2) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x30,2) " \n"
+ "lea " MEMLEA(0x40,2) ",%2 \n"
+ "sub $0x10,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_SOBELXYROW_SSE2
+
+#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
+// Creates a table of cumulative sums where each value is a sum of all values
+// above and to the left of the value, inclusive of the value.
+void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
+ const int32* previous_cumsum, int width) {
+ asm volatile (
+ "pxor %%xmm0,%%xmm0 \n"
+ "pxor %%xmm1,%%xmm1 \n"
+ "sub $0x4,%3 \n"
+ "jl 49f \n"
+ "test $0xf,%1 \n"
+ "jne 49f \n"
+
+ // 4 pixel loop \n"
+ LABELALIGN
+ "40: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm2 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm2,%%xmm4 \n"
+ "punpcklbw %%xmm1,%%xmm2 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "punpcklwd %%xmm1,%%xmm2 \n"
+ "punpckhwd %%xmm1,%%xmm3 \n"
+ "punpckhbw %%xmm1,%%xmm4 \n"
+ "movdqa %%xmm4,%%xmm5 \n"
+ "punpcklwd %%xmm1,%%xmm4 \n"
+ "punpckhwd %%xmm1,%%xmm5 \n"
+ "paddd %%xmm2,%%xmm0 \n"
+ "movdqu " MEMACCESS(2) ",%%xmm2 \n"
+ "paddd %%xmm0,%%xmm2 \n"
+ "paddd %%xmm3,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,2) ",%%xmm3 \n"
+ "paddd %%xmm0,%%xmm3 \n"
+ "paddd %%xmm4,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x20,2) ",%%xmm4 \n"
+ "paddd %%xmm0,%%xmm4 \n"
+ "paddd %%xmm5,%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x30,2) ",%%xmm5 \n"
+ "lea " MEMLEA(0x40,2) ",%2 \n"
+ "paddd %%xmm0,%%xmm5 \n"
+ "movdqu %%xmm2," MEMACCESS(1) " \n"
+ "movdqu %%xmm3," MEMACCESS2(0x10,1) " \n"
+ "movdqu %%xmm4," MEMACCESS2(0x20,1) " \n"
+ "movdqu %%xmm5," MEMACCESS2(0x30,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x4,%3 \n"
+ "jge 40b \n"
+
+ "49: \n"
+ "add $0x3,%3 \n"
+ "jl 19f \n"
+
+ // 1 pixel loop \n"
+ LABELALIGN
+ "10: \n"
+ "movd " MEMACCESS(0) ",%%xmm2 \n"
+ "lea " MEMLEA(0x4,0) ",%0 \n"
+ "punpcklbw %%xmm1,%%xmm2 \n"
+ "punpcklwd %%xmm1,%%xmm2 \n"
+ "paddd %%xmm2,%%xmm0 \n"
+ "movdqu " MEMACCESS(2) ",%%xmm2 \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "paddd %%xmm0,%%xmm2 \n"
+ "movdqu %%xmm2," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x1,%3 \n"
+ "jge 10b \n"
+
+ "19: \n"
+ : "+r"(row), // %0
+ "+r"(cumsum), // %1
+ "+r"(previous_cumsum), // %2
+ "+r"(width) // %3
+ :
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_COMPUTECUMULATIVESUMROW_SSE2
+
+#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
+void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
+ int width, int area, uint8* dst,
+ int count) {
+ asm volatile (
+ "movd %5,%%xmm5 \n"
+ "cvtdq2ps %%xmm5,%%xmm5 \n"
+ "rcpss %%xmm5,%%xmm4 \n"
+ "pshufd $0x0,%%xmm4,%%xmm4 \n"
+ "sub $0x4,%3 \n"
+ "jl 49f \n"
+ "cmpl $0x80,%5 \n"
+ "ja 40f \n"
+
+ "pshufd $0x0,%%xmm5,%%xmm5 \n"
+ "pcmpeqb %%xmm6,%%xmm6 \n"
+ "psrld $0x10,%%xmm6 \n"
+ "cvtdq2ps %%xmm6,%%xmm6 \n"
+ "addps %%xmm6,%%xmm5 \n"
+ "mulps %%xmm4,%%xmm5 \n"
+ "cvtps2dq %%xmm5,%%xmm5 \n"
+ "packssdw %%xmm5,%%xmm5 \n"
+
+ // 4 pixel small loop \n"
+ LABELALIGN
+ "4: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ MEMOPREG(psubd,0x00,0,4,4,xmm0) // psubd 0x00(%0,%4,4),%%xmm0
+ MEMOPREG(psubd,0x10,0,4,4,xmm1) // psubd 0x10(%0,%4,4),%%xmm1
+ MEMOPREG(psubd,0x20,0,4,4,xmm2) // psubd 0x20(%0,%4,4),%%xmm2
+ MEMOPREG(psubd,0x30,0,4,4,xmm3) // psubd 0x30(%0,%4,4),%%xmm3
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "psubd " MEMACCESS(1) ",%%xmm0 \n"
+ "psubd " MEMACCESS2(0x10,1) ",%%xmm1 \n"
+ "psubd " MEMACCESS2(0x20,1) ",%%xmm2 \n"
+ "psubd " MEMACCESS2(0x30,1) ",%%xmm3 \n"
+ MEMOPREG(paddd,0x00,1,4,4,xmm0) // paddd 0x00(%1,%4,4),%%xmm0
+ MEMOPREG(paddd,0x10,1,4,4,xmm1) // paddd 0x10(%1,%4,4),%%xmm1
+ MEMOPREG(paddd,0x20,1,4,4,xmm2) // paddd 0x20(%1,%4,4),%%xmm2
+ MEMOPREG(paddd,0x30,1,4,4,xmm3) // paddd 0x30(%1,%4,4),%%xmm3
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "packssdw %%xmm1,%%xmm0 \n"
+ "packssdw %%xmm3,%%xmm2 \n"
+ "pmulhuw %%xmm5,%%xmm0 \n"
+ "pmulhuw %%xmm5,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jge 4b \n"
+ "jmp 49f \n"
+
+ // 4 pixel loop \n"
+ LABELALIGN
+ "40: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "movdqu " MEMACCESS2(0x20,0) ",%%xmm2 \n"
+ "movdqu " MEMACCESS2(0x30,0) ",%%xmm3 \n"
+ MEMOPREG(psubd,0x00,0,4,4,xmm0) // psubd 0x00(%0,%4,4),%%xmm0
+ MEMOPREG(psubd,0x10,0,4,4,xmm1) // psubd 0x10(%0,%4,4),%%xmm1
+ MEMOPREG(psubd,0x20,0,4,4,xmm2) // psubd 0x20(%0,%4,4),%%xmm2
+ MEMOPREG(psubd,0x30,0,4,4,xmm3) // psubd 0x30(%0,%4,4),%%xmm3
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "psubd " MEMACCESS(1) ",%%xmm0 \n"
+ "psubd " MEMACCESS2(0x10,1) ",%%xmm1 \n"
+ "psubd " MEMACCESS2(0x20,1) ",%%xmm2 \n"
+ "psubd " MEMACCESS2(0x30,1) ",%%xmm3 \n"
+ MEMOPREG(paddd,0x00,1,4,4,xmm0) // paddd 0x00(%1,%4,4),%%xmm0
+ MEMOPREG(paddd,0x10,1,4,4,xmm1) // paddd 0x10(%1,%4,4),%%xmm1
+ MEMOPREG(paddd,0x20,1,4,4,xmm2) // paddd 0x20(%1,%4,4),%%xmm2
+ MEMOPREG(paddd,0x30,1,4,4,xmm3) // paddd 0x30(%1,%4,4),%%xmm3
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "cvtdq2ps %%xmm0,%%xmm0 \n"
+ "cvtdq2ps %%xmm1,%%xmm1 \n"
+ "mulps %%xmm4,%%xmm0 \n"
+ "mulps %%xmm4,%%xmm1 \n"
+ "cvtdq2ps %%xmm2,%%xmm2 \n"
+ "cvtdq2ps %%xmm3,%%xmm3 \n"
+ "mulps %%xmm4,%%xmm2 \n"
+ "mulps %%xmm4,%%xmm3 \n"
+ "cvtps2dq %%xmm0,%%xmm0 \n"
+ "cvtps2dq %%xmm1,%%xmm1 \n"
+ "cvtps2dq %%xmm2,%%xmm2 \n"
+ "cvtps2dq %%xmm3,%%xmm3 \n"
+ "packssdw %%xmm1,%%xmm0 \n"
+ "packssdw %%xmm3,%%xmm2 \n"
+ "packuswb %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jge 40b \n"
+
+ "49: \n"
+ "add $0x3,%3 \n"
+ "jl 19f \n"
+
+ // 1 pixel loop \n"
+ LABELALIGN
+ "10: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(psubd,0x00,0,4,4,xmm0) // psubd 0x00(%0,%4,4),%%xmm0
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "psubd " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(paddd,0x00,1,4,4,xmm0) // paddd 0x00(%1,%4,4),%%xmm0
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "cvtdq2ps %%xmm0,%%xmm0 \n"
+ "mulps %%xmm4,%%xmm0 \n"
+ "cvtps2dq %%xmm0,%%xmm0 \n"
+ "packssdw %%xmm0,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movd %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x4,2) ",%2 \n"
+ "sub $0x1,%3 \n"
+ "jge 10b \n"
+ "19: \n"
+ : "+r"(topleft), // %0
+ "+r"(botleft), // %1
+ "+r"(dst), // %2
+ "+rm"(count) // %3
+ : "r"((intptr_t)(width)), // %4
+ "rm"(area) // %5
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+#endif // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
+
+#ifdef HAS_ARGBAFFINEROW_SSE2
+// Copy ARGB pixels from source image with slope to a row of destination.
+LIBYUV_API
+void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* src_dudv, int width) {
+ intptr_t src_argb_stride_temp = src_argb_stride;
+ intptr_t temp = 0;
+ asm volatile (
+ "movq " MEMACCESS(3) ",%%xmm2 \n"
+ "movq " MEMACCESS2(0x08,3) ",%%xmm7 \n"
+ "shl $0x10,%1 \n"
+ "add $0x4,%1 \n"
+ "movd %1,%%xmm5 \n"
+ "sub $0x4,%4 \n"
+ "jl 49f \n"
+
+ "pshufd $0x44,%%xmm7,%%xmm7 \n"
+ "pshufd $0x0,%%xmm5,%%xmm5 \n"
+ "movdqa %%xmm2,%%xmm0 \n"
+ "addps %%xmm7,%%xmm0 \n"
+ "movlhps %%xmm0,%%xmm2 \n"
+ "movdqa %%xmm7,%%xmm4 \n"
+ "addps %%xmm4,%%xmm4 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "addps %%xmm4,%%xmm3 \n"
+ "addps %%xmm4,%%xmm4 \n"
+
+ // 4 pixel loop \n"
+ LABELALIGN
+ "40: \n"
+ "cvttps2dq %%xmm2,%%xmm0 \n" // x, y float to int first 2
+ "cvttps2dq %%xmm3,%%xmm1 \n" // x, y float to int next 2
+ "packssdw %%xmm1,%%xmm0 \n" // x, y as 8 shorts
+ "pmaddwd %%xmm5,%%xmm0 \n" // off = x * 4 + y * stride
+ "movd %%xmm0,%k1 \n"
+ "pshufd $0x39,%%xmm0,%%xmm0 \n"
+ "movd %%xmm0,%k5 \n"
+ "pshufd $0x39,%%xmm0,%%xmm0 \n"
+ MEMOPREG(movd,0x00,0,1,1,xmm1) // movd (%0,%1,1),%%xmm1
+ MEMOPREG(movd,0x00,0,5,1,xmm6) // movd (%0,%5,1),%%xmm6
+ "punpckldq %%xmm6,%%xmm1 \n"
+ "addps %%xmm4,%%xmm2 \n"
+ "movq %%xmm1," MEMACCESS(2) " \n"
+ "movd %%xmm0,%k1 \n"
+ "pshufd $0x39,%%xmm0,%%xmm0 \n"
+ "movd %%xmm0,%k5 \n"
+ MEMOPREG(movd,0x00,0,1,1,xmm0) // movd (%0,%1,1),%%xmm0
+ MEMOPREG(movd,0x00,0,5,1,xmm6) // movd (%0,%5,1),%%xmm6
+ "punpckldq %%xmm6,%%xmm0 \n"
+ "addps %%xmm4,%%xmm3 \n"
+ "movq %%xmm0," MEMACCESS2(0x08,2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%4 \n"
+ "jge 40b \n"
+
+ "49: \n"
+ "add $0x3,%4 \n"
+ "jl 19f \n"
+
+ // 1 pixel loop \n"
+ LABELALIGN
+ "10: \n"
+ "cvttps2dq %%xmm2,%%xmm0 \n"
+ "packssdw %%xmm0,%%xmm0 \n"
+ "pmaddwd %%xmm5,%%xmm0 \n"
+ "addps %%xmm7,%%xmm2 \n"
+ "movd %%xmm0,%k1 \n"
+ MEMOPREG(movd,0x00,0,1,1,xmm0) // movd (%0,%1,1),%%xmm0
+ "movd %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x04,2) ",%2 \n"
+ "sub $0x1,%4 \n"
+ "jge 10b \n"
+ "19: \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_argb_stride_temp), // %1
+ "+r"(dst_argb), // %2
+ "+r"(src_dudv), // %3
+ "+rm"(width), // %4
+ "+r"(temp) // %5
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBAFFINEROW_SSE2
+
+#ifdef HAS_INTERPOLATEROW_SSSE3
+// Bilinear filter 16x2 -> 16x1
+void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ asm volatile (
+ "sub %1,%0 \n"
+ "shr %3 \n"
+ "cmp $0x0,%3 \n"
+ "je 100f \n"
+ "cmp $0x20,%3 \n"
+ "je 75f \n"
+ "cmp $0x40,%3 \n"
+ "je 50f \n"
+ "cmp $0x60,%3 \n"
+ "je 25f \n"
+
+ "movd %3,%%xmm0 \n"
+ "neg %3 \n"
+ "add $0x80,%3 \n"
+ "movd %3,%%xmm5 \n"
+ "punpcklbw %%xmm0,%%xmm5 \n"
+ "punpcklwd %%xmm5,%%xmm5 \n"
+ "pshufd $0x0,%%xmm5,%%xmm5 \n"
+
+ // General purpose row blend.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm2)
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm2,%%xmm0 \n"
+ "punpckhbw %%xmm2,%%xmm1 \n"
+ "pmaddubsw %%xmm5,%%xmm0 \n"
+ "pmaddubsw %%xmm5,%%xmm1 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "psrlw $0x7,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ "jmp 99f \n"
+
+ // Blend 25 / 75.
+ LABELALIGN
+ "25: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm1)
+ "pavgb %%xmm1,%%xmm0 \n"
+ "pavgb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 25b \n"
+ "jmp 99f \n"
+
+ // Blend 50 / 50.
+ LABELALIGN
+ "50: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm1)
+ "pavgb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 50b \n"
+ "jmp 99f \n"
+
+ // Blend 75 / 25.
+ LABELALIGN
+ "75: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm0)
+ "pavgb %%xmm1,%%xmm0 \n"
+ "pavgb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 75b \n"
+ "jmp 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ LABELALIGN
+ "100: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 100b \n"
+
+ "99: \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(source_y_fraction) // %3
+ : "r"((intptr_t)(src_stride)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm5"
+ );
+}
+#endif // HAS_INTERPOLATEROW_SSSE3
+
+#ifdef HAS_INTERPOLATEROW_AVX2
+// Bilinear filter 32x2 -> 32x1
+void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ asm volatile (
+ "shr %3 \n"
+ "cmp $0x0,%3 \n"
+ "je 100f \n"
+ "sub %1,%0 \n"
+ "cmp $0x20,%3 \n"
+ "je 75f \n"
+ "cmp $0x40,%3 \n"
+ "je 50f \n"
+ "cmp $0x60,%3 \n"
+ "je 25f \n"
+
+ "vmovd %3,%%xmm0 \n"
+ "neg %3 \n"
+ "add $0x80,%3 \n"
+ "vmovd %3,%%xmm5 \n"
+ "vpunpcklbw %%xmm0,%%xmm5,%%xmm5 \n"
+ "vpunpcklwd %%xmm5,%%xmm5,%%xmm5 \n"
+ "vpxor %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpermd %%ymm5,%%ymm0,%%ymm5 \n"
+
+ // General purpose row blend.
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(1) ",%%ymm0 \n"
+ MEMOPREG(vmovdqu,0x00,1,4,1,ymm2)
+ "vpunpckhbw %%ymm2,%%ymm0,%%ymm1 \n"
+ "vpunpcklbw %%ymm2,%%ymm0,%%ymm0 \n"
+ "vpmaddubsw %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpmaddubsw %%ymm5,%%ymm1,%%ymm1 \n"
+ "vpsrlw $0x7,%%ymm0,%%ymm0 \n"
+ "vpsrlw $0x7,%%ymm1,%%ymm1 \n"
+ "vpackuswb %%ymm1,%%ymm0,%%ymm0 \n"
+ MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1)
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+ "jmp 99f \n"
+
+ // Blend 25 / 75.
+ LABELALIGN
+ "25: \n"
+ "vmovdqu " MEMACCESS(1) ",%%ymm0 \n"
+ MEMOPREG(vmovdqu,0x00,1,4,1,ymm1)
+ "vpavgb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpavgb %%ymm1,%%ymm0,%%ymm0 \n"
+ MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1)
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 25b \n"
+ "jmp 99f \n"
+
+ // Blend 50 / 50.
+ LABELALIGN
+ "50: \n"
+ "vmovdqu " MEMACCESS(1) ",%%ymm0 \n"
+ VMEMOPREG(vpavgb,0x00,1,4,1,ymm0,ymm0) // vpavgb (%1,%4,1),%%ymm0,%%ymm0
+ MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1)
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 50b \n"
+ "jmp 99f \n"
+
+ // Blend 75 / 25.
+ LABELALIGN
+ "75: \n"
+ "vmovdqu " MEMACCESS(1) ",%%ymm1 \n"
+ MEMOPREG(vmovdqu,0x00,1,4,1,ymm0)
+ "vpavgb %%ymm1,%%ymm0,%%ymm0 \n"
+ "vpavgb %%ymm1,%%ymm0,%%ymm0 \n"
+ MEMOPMEM(vmovdqu,ymm0,0x00,1,0,1)
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x20,%2 \n"
+ "jg 75b \n"
+ "jmp 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ LABELALIGN
+ "100: \n"
+ "rep movsb " MEMMOVESTRING(1,0) " \n"
+ "jmp 999f \n"
+
+ "99: \n"
+ "vzeroupper \n"
+ "999: \n"
+ : "+D"(dst_ptr), // %0
+ "+S"(src_ptr), // %1
+ "+c"(dst_width), // %2
+ "+r"(source_y_fraction) // %3
+ : "r"((intptr_t)(src_stride)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm5"
+ );
+}
+#endif // HAS_INTERPOLATEROW_AVX2
+
+#ifdef HAS_INTERPOLATEROW_SSE2
+// Bilinear filter 16x2 -> 16x1
+void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ asm volatile (
+ "sub %1,%0 \n"
+ "shr %3 \n"
+ "cmp $0x0,%3 \n"
+ "je 100f \n"
+ "cmp $0x20,%3 \n"
+ "je 75f \n"
+ "cmp $0x40,%3 \n"
+ "je 50f \n"
+ "cmp $0x60,%3 \n"
+ "je 25f \n"
+
+ "movd %3,%%xmm0 \n"
+ "neg %3 \n"
+ "add $0x80,%3 \n"
+ "movd %3,%%xmm5 \n"
+ "punpcklbw %%xmm0,%%xmm5 \n"
+ "punpcklwd %%xmm5,%%xmm5 \n"
+ "pshufd $0x0,%%xmm5,%%xmm5 \n"
+ "pxor %%xmm4,%%xmm4 \n"
+
+ // General purpose row blend.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm2) // movdqu (%1,%4,1),%%xmm2
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "punpcklbw %%xmm4,%%xmm2 \n"
+ "punpckhbw %%xmm4,%%xmm3 \n"
+ "punpcklbw %%xmm4,%%xmm0 \n"
+ "punpckhbw %%xmm4,%%xmm1 \n"
+ "psubw %%xmm0,%%xmm2 \n"
+ "psubw %%xmm1,%%xmm3 \n"
+ "paddw %%xmm2,%%xmm2 \n"
+ "paddw %%xmm3,%%xmm3 \n"
+ "pmulhw %%xmm5,%%xmm2 \n"
+ "pmulhw %%xmm5,%%xmm3 \n"
+ "paddw %%xmm2,%%xmm0 \n"
+ "paddw %%xmm3,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ "jmp 99f \n"
+
+ // Blend 25 / 75.
+ LABELALIGN
+ "25: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm1) // movdqu (%1,%4,1),%%xmm1
+ "pavgb %%xmm1,%%xmm0 \n"
+ "pavgb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 25b \n"
+ "jmp 99f \n"
+
+ // Blend 50 / 50.
+ LABELALIGN
+ "50: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm1) // movdqu (%1,%4,1),%%xmm1
+ "pavgb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 50b \n"
+ "jmp 99f \n"
+
+ // Blend 75 / 25.
+ LABELALIGN
+ "75: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,1,4,1,xmm0) // movdqu (%1,%4,1),%%xmm0
+ "pavgb %%xmm1,%%xmm0 \n"
+ "pavgb %%xmm1,%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 75b \n"
+ "jmp 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ LABELALIGN
+ "100: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ MEMOPMEM(movdqu,xmm0,0x00,1,0,1) // movdqu %%xmm0,(%1,%0,1)
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 100b \n"
+
+ "99: \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(source_y_fraction) // %3
+ : "r"((intptr_t)(src_stride)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_INTERPOLATEROW_SSE2
+
+#ifdef HAS_ARGBSHUFFLEROW_SSSE3
+// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
+void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ asm volatile (
+ "movdqu " MEMACCESS(3) ",%%xmm5 \n"
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pshufb %%xmm5,%%xmm0 \n"
+ "pshufb %%xmm5,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ : "r"(shuffler) // %3
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // HAS_ARGBSHUFFLEROW_SSSE3
+
+#ifdef HAS_ARGBSHUFFLEROW_AVX2
+// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
+void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ asm volatile (
+ "vbroadcastf128 " MEMACCESS(3) ",%%ymm5 \n"
+ LABELALIGN
+ "1: \n"
+ "vmovdqu " MEMACCESS(0) ",%%ymm0 \n"
+ "vmovdqu " MEMACCESS2(0x20,0) ",%%ymm1 \n"
+ "lea " MEMLEA(0x40,0) ",%0 \n"
+ "vpshufb %%ymm5,%%ymm0,%%ymm0 \n"
+ "vpshufb %%ymm5,%%ymm1,%%ymm1 \n"
+ "vmovdqu %%ymm0," MEMACCESS(1) " \n"
+ "vmovdqu %%ymm1," MEMACCESS2(0x20,1) " \n"
+ "lea " MEMLEA(0x40,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ : "r"(shuffler) // %3
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // HAS_ARGBSHUFFLEROW_AVX2
+
+#ifdef HAS_ARGBSHUFFLEROW_SSE2
+// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
+void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ uintptr_t pixel_temp = 0u;
+ asm volatile (
+ "pxor %%xmm5,%%xmm5 \n"
+ "mov " MEMACCESS(4) ",%k2 \n"
+ "cmp $0x3000102,%k2 \n"
+ "je 3012f \n"
+ "cmp $0x10203,%k2 \n"
+ "je 123f \n"
+ "cmp $0x30201,%k2 \n"
+ "je 321f \n"
+ "cmp $0x2010003,%k2 \n"
+ "je 2103f \n"
+
+ LABELALIGN
+ "1: \n"
+ "movzb " MEMACCESS(4) ",%2 \n"
+ MEMOPARG(movzb,0x00,0,2,1,2) " \n" // movzb (%0,%2,1),%2
+ "mov %b2," MEMACCESS(1) " \n"
+ "movzb " MEMACCESS2(0x1,4) ",%2 \n"
+ MEMOPARG(movzb,0x00,0,2,1,2) " \n" // movzb (%0,%2,1),%2
+ "mov %b2," MEMACCESS2(0x1,1) " \n"
+ "movzb " MEMACCESS2(0x2,4) ",%2 \n"
+ MEMOPARG(movzb,0x00,0,2,1,2) " \n" // movzb (%0,%2,1),%2
+ "mov %b2," MEMACCESS2(0x2,1) " \n"
+ "movzb " MEMACCESS2(0x3,4) ",%2 \n"
+ MEMOPARG(movzb,0x00,0,2,1,2) " \n" // movzb (%0,%2,1),%2
+ "mov %b2," MEMACCESS2(0x3,1) " \n"
+ "lea " MEMLEA(0x4,0) ",%0 \n"
+ "lea " MEMLEA(0x4,1) ",%1 \n"
+ "sub $0x1,%3 \n"
+ "jg 1b \n"
+ "jmp 99f \n"
+
+ LABELALIGN
+ "123: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpckhbw %%xmm5,%%xmm1 \n"
+ "pshufhw $0x1b,%%xmm0,%%xmm0 \n"
+ "pshuflw $0x1b,%%xmm0,%%xmm0 \n"
+ "pshufhw $0x1b,%%xmm1,%%xmm1 \n"
+ "pshuflw $0x1b,%%xmm1,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%3 \n"
+ "jg 123b \n"
+ "jmp 99f \n"
+
+ LABELALIGN
+ "321: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpckhbw %%xmm5,%%xmm1 \n"
+ "pshufhw $0x39,%%xmm0,%%xmm0 \n"
+ "pshuflw $0x39,%%xmm0,%%xmm0 \n"
+ "pshufhw $0x39,%%xmm1,%%xmm1 \n"
+ "pshuflw $0x39,%%xmm1,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%3 \n"
+ "jg 321b \n"
+ "jmp 99f \n"
+
+ LABELALIGN
+ "2103: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpckhbw %%xmm5,%%xmm1 \n"
+ "pshufhw $0x93,%%xmm0,%%xmm0 \n"
+ "pshuflw $0x93,%%xmm0,%%xmm0 \n"
+ "pshufhw $0x93,%%xmm1,%%xmm1 \n"
+ "pshuflw $0x93,%%xmm1,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%3 \n"
+ "jg 2103b \n"
+ "jmp 99f \n"
+
+ LABELALIGN
+ "3012: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpckhbw %%xmm5,%%xmm1 \n"
+ "pshufhw $0xc6,%%xmm0,%%xmm0 \n"
+ "pshuflw $0xc6,%%xmm0,%%xmm0 \n"
+ "pshufhw $0xc6,%%xmm1,%%xmm1 \n"
+ "pshuflw $0xc6,%%xmm1,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%3 \n"
+ "jg 3012b \n"
+
+ "99: \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+d"(pixel_temp), // %2
+ "+r"(pix) // %3
+ : "r"(shuffler) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm5"
+ );
+}
+#endif // HAS_ARGBSHUFFLEROW_SSE2
+
+#ifdef HAS_I422TOYUY2ROW_SSE2
+void I422ToYUY2Row_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_frame, int width) {
+ asm volatile (
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(1) ",%%xmm2 \n"
+ MEMOPREG(movq,0x00,1,2,1,xmm3) // movq (%1,%2,1),%%xmm3
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "punpcklbw %%xmm3,%%xmm2 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm2,%%xmm0 \n"
+ "punpckhbw %%xmm2,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(3) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,3) " \n"
+ "lea " MEMLEA(0x20,3) ",%3 \n"
+ "sub $0x10,%4 \n"
+ "jg 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_frame), // %3
+ "+rm"(width) // %4
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3"
+ );
+}
+#endif // HAS_I422TOYUY2ROW_SSE2
+
+#ifdef HAS_I422TOUYVYROW_SSE2
+void I422ToUYVYRow_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_frame, int width) {
+ asm volatile (
+ "sub %1,%2 \n"
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(1) ",%%xmm2 \n"
+ MEMOPREG(movq,0x00,1,2,1,xmm3) // movq (%1,%2,1),%%xmm3
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "punpcklbw %%xmm3,%%xmm2 \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqa %%xmm2,%%xmm1 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "punpcklbw %%xmm0,%%xmm1 \n"
+ "punpckhbw %%xmm0,%%xmm2 \n"
+ "movdqu %%xmm1," MEMACCESS(3) " \n"
+ "movdqu %%xmm2," MEMACCESS2(0x10,3) " \n"
+ "lea " MEMLEA(0x20,3) ",%3 \n"
+ "sub $0x10,%4 \n"
+ "jg 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_frame), // %3
+ "+rm"(width) // %4
+ :
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3"
+ );
+}
+#endif // HAS_I422TOUYVYROW_SSE2
+
+#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
+void ARGBPolynomialRow_SSE2(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width) {
+ asm volatile (
+ "pxor %%xmm3,%%xmm3 \n"
+
+ // 2 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "punpcklbw %%xmm3,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm4 \n"
+ "punpcklwd %%xmm3,%%xmm0 \n"
+ "punpckhwd %%xmm3,%%xmm4 \n"
+ "cvtdq2ps %%xmm0,%%xmm0 \n"
+ "cvtdq2ps %%xmm4,%%xmm4 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "movdqa %%xmm4,%%xmm5 \n"
+ "mulps " MEMACCESS2(0x10,3) ",%%xmm0 \n"
+ "mulps " MEMACCESS2(0x10,3) ",%%xmm4 \n"
+ "addps " MEMACCESS(3) ",%%xmm0 \n"
+ "addps " MEMACCESS(3) ",%%xmm4 \n"
+ "movdqa %%xmm1,%%xmm2 \n"
+ "movdqa %%xmm5,%%xmm6 \n"
+ "mulps %%xmm1,%%xmm2 \n"
+ "mulps %%xmm5,%%xmm6 \n"
+ "mulps %%xmm2,%%xmm1 \n"
+ "mulps %%xmm6,%%xmm5 \n"
+ "mulps " MEMACCESS2(0x20,3) ",%%xmm2 \n"
+ "mulps " MEMACCESS2(0x20,3) ",%%xmm6 \n"
+ "mulps " MEMACCESS2(0x30,3) ",%%xmm1 \n"
+ "mulps " MEMACCESS2(0x30,3) ",%%xmm5 \n"
+ "addps %%xmm2,%%xmm0 \n"
+ "addps %%xmm6,%%xmm4 \n"
+ "addps %%xmm1,%%xmm0 \n"
+ "addps %%xmm5,%%xmm4 \n"
+ "cvttps2dq %%xmm0,%%xmm0 \n"
+ "cvttps2dq %%xmm4,%%xmm4 \n"
+ "packuswb %%xmm4,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x2,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(poly) // %3
+ : "memory", "cc"
+ , "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+#endif // HAS_ARGBPOLYNOMIALROW_SSE2
+
+#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
+void ARGBPolynomialRow_AVX2(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width) {
+ asm volatile (
+ "vbroadcastf128 " MEMACCESS(3) ",%%ymm4 \n"
+ "vbroadcastf128 " MEMACCESS2(0x10,3) ",%%ymm5 \n"
+ "vbroadcastf128 " MEMACCESS2(0x20,3) ",%%ymm6 \n"
+ "vbroadcastf128 " MEMACCESS2(0x30,3) ",%%ymm7 \n"
+
+ // 2 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "vpmovzxbd " MEMACCESS(0) ",%%ymm0 \n" // 2 ARGB pixels
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "vcvtdq2ps %%ymm0,%%ymm0 \n" // X 8 floats
+ "vmulps %%ymm0,%%ymm0,%%ymm2 \n" // X * X
+ "vmulps %%ymm7,%%ymm0,%%ymm3 \n" // C3 * X
+ "vfmadd132ps %%ymm5,%%ymm4,%%ymm0 \n" // result = C0 + C1 * X
+ "vfmadd231ps %%ymm6,%%ymm2,%%ymm0 \n" // result += C2 * X * X
+ "vfmadd231ps %%ymm3,%%ymm2,%%ymm0 \n" // result += C3 * X * X * X
+ "vcvttps2dq %%ymm0,%%ymm0 \n"
+ "vpackusdw %%ymm0,%%ymm0,%%ymm0 \n"
+ "vpermq $0xd8,%%ymm0,%%ymm0 \n"
+ "vpackuswb %%xmm0,%%xmm0,%%xmm0 \n"
+ "vmovq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x2,%2 \n"
+ "jg 1b \n"
+ "vzeroupper \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(poly) // %3
+ : "memory", "cc",
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+#endif // HAS_ARGBPOLYNOMIALROW_AVX2
+
+#ifdef HAS_ARGBCOLORTABLEROW_X86
+// Tranform ARGB pixels with color table.
+void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
+ int width) {
+ uintptr_t pixel_temp = 0u;
+ asm volatile (
+ // 1 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movzb " MEMACCESS(0) ",%1 \n"
+ "lea " MEMLEA(0x4,0) ",%0 \n"
+ MEMOPARG(movzb,0x00,3,1,4,1) " \n" // movzb (%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x4,0) " \n"
+ "movzb " MEMACCESS2(-0x3,0) ",%1 \n"
+ MEMOPARG(movzb,0x01,3,1,4,1) " \n" // movzb 0x1(%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x3,0) " \n"
+ "movzb " MEMACCESS2(-0x2,0) ",%1 \n"
+ MEMOPARG(movzb,0x02,3,1,4,1) " \n" // movzb 0x2(%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x2,0) " \n"
+ "movzb " MEMACCESS2(-0x1,0) ",%1 \n"
+ MEMOPARG(movzb,0x03,3,1,4,1) " \n" // movzb 0x3(%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x1,0) " \n"
+ "dec %2 \n"
+ "jg 1b \n"
+ : "+r"(dst_argb), // %0
+ "+d"(pixel_temp), // %1
+ "+r"(width) // %2
+ : "r"(table_argb) // %3
+ : "memory", "cc");
+}
+#endif // HAS_ARGBCOLORTABLEROW_X86
+
+#ifdef HAS_RGBCOLORTABLEROW_X86
+// Tranform RGB pixels with color table.
+void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
+ uintptr_t pixel_temp = 0u;
+ asm volatile (
+ // 1 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movzb " MEMACCESS(0) ",%1 \n"
+ "lea " MEMLEA(0x4,0) ",%0 \n"
+ MEMOPARG(movzb,0x00,3,1,4,1) " \n" // movzb (%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x4,0) " \n"
+ "movzb " MEMACCESS2(-0x3,0) ",%1 \n"
+ MEMOPARG(movzb,0x01,3,1,4,1) " \n" // movzb 0x1(%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x3,0) " \n"
+ "movzb " MEMACCESS2(-0x2,0) ",%1 \n"
+ MEMOPARG(movzb,0x02,3,1,4,1) " \n" // movzb 0x2(%3,%1,4),%1
+ "mov %b1," MEMACCESS2(-0x2,0) " \n"
+ "dec %2 \n"
+ "jg 1b \n"
+ : "+r"(dst_argb), // %0
+ "+d"(pixel_temp), // %1
+ "+r"(width) // %2
+ : "r"(table_argb) // %3
+ : "memory", "cc");
+}
+#endif // HAS_RGBCOLORTABLEROW_X86
+
+#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
+// Tranform RGB pixels with luma table.
+void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ int width,
+ const uint8* luma, uint32 lumacoeff) {
+ uintptr_t pixel_temp = 0u;
+ uintptr_t table_temp = 0u;
+ asm volatile (
+ "movd %6,%%xmm3 \n"
+ "pshufd $0x0,%%xmm3,%%xmm3 \n"
+ "pcmpeqb %%xmm4,%%xmm4 \n"
+ "psllw $0x8,%%xmm4 \n"
+ "pxor %%xmm5,%%xmm5 \n"
+
+ // 4 pixel loop.
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(2) ",%%xmm0 \n"
+ "pmaddubsw %%xmm3,%%xmm0 \n"
+ "phaddw %%xmm0,%%xmm0 \n"
+ "pand %%xmm4,%%xmm0 \n"
+ "punpcklwd %%xmm5,%%xmm0 \n"
+ "movd %%xmm0,%k1 \n" // 32 bit offset
+ "add %5,%1 \n"
+ "pshufd $0x39,%%xmm0,%%xmm0 \n"
+
+ "movzb " MEMACCESS(2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS(3) " \n"
+ "movzb " MEMACCESS2(0x1,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x1,3) " \n"
+ "movzb " MEMACCESS2(0x2,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x2,3) " \n"
+ "movzb " MEMACCESS2(0x3,2) ",%0 \n"
+ "mov %b0," MEMACCESS2(0x3,3) " \n"
+
+ "movd %%xmm0,%k1 \n" // 32 bit offset
+ "add %5,%1 \n"
+ "pshufd $0x39,%%xmm0,%%xmm0 \n"
+
+ "movzb " MEMACCESS2(0x4,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x4,3) " \n"
+ "movzb " MEMACCESS2(0x5,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x5,3) " \n"
+ "movzb " MEMACCESS2(0x6,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x6,3) " \n"
+ "movzb " MEMACCESS2(0x7,2) ",%0 \n"
+ "mov %b0," MEMACCESS2(0x7,3) " \n"
+
+ "movd %%xmm0,%k1 \n" // 32 bit offset
+ "add %5,%1 \n"
+ "pshufd $0x39,%%xmm0,%%xmm0 \n"
+
+ "movzb " MEMACCESS2(0x8,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x8,3) " \n"
+ "movzb " MEMACCESS2(0x9,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0x9,3) " \n"
+ "movzb " MEMACCESS2(0xa,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0xa,3) " \n"
+ "movzb " MEMACCESS2(0xb,2) ",%0 \n"
+ "mov %b0," MEMACCESS2(0xb,3) " \n"
+
+ "movd %%xmm0,%k1 \n" // 32 bit offset
+ "add %5,%1 \n"
+
+ "movzb " MEMACCESS2(0xc,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0xc,3) " \n"
+ "movzb " MEMACCESS2(0xd,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0xd,3) " \n"
+ "movzb " MEMACCESS2(0xe,2) ",%0 \n"
+ MEMOPARG(movzb,0x00,1,0,1,0) " \n" // movzb (%1,%0,1),%0
+ "mov %b0," MEMACCESS2(0xe,3) " \n"
+ "movzb " MEMACCESS2(0xf,2) ",%0 \n"
+ "mov %b0," MEMACCESS2(0xf,3) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "lea " MEMLEA(0x10,3) ",%3 \n"
+ "sub $0x4,%4 \n"
+ "jg 1b \n"
+ : "+d"(pixel_temp), // %0
+ "+a"(table_temp), // %1
+ "+r"(src_argb), // %2
+ "+r"(dst_argb), // %3
+ "+rm"(width) // %4
+ : "r"(luma), // %5
+ "rm"(lumacoeff) // %6
+ : "memory", "cc", "xmm0", "xmm3", "xmm4", "xmm5"
+ );
+}
+#endif // HAS_ARGBLUMACOLORTABLEROW_SSSE3
+
+#endif // defined(__x86_64__) || defined(__i386__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_mips.cc b/media/libaom/src/third_party/libyuv/source/row_mips.cc
new file mode 100644
index 000000000..cfc9ffe03
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_mips.cc
@@ -0,0 +1,911 @@
+/*
+ * Copyright (c) 2012 The LibYuv project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// The following are available on Mips platforms:
+#if !defined(LIBYUV_DISABLE_MIPS) && defined(__mips__) && \
+ (_MIPS_SIM == _MIPS_SIM_ABI32)
+
+#ifdef HAS_COPYROW_MIPS
+void CopyRow_MIPS(const uint8* src, uint8* dst, int count) {
+ __asm__ __volatile__ (
+ ".set noreorder \n"
+ ".set noat \n"
+ "slti $at, %[count], 8 \n"
+ "bne $at ,$zero, $last8 \n"
+ "xor $t8, %[src], %[dst] \n"
+ "andi $t8, $t8, 0x3 \n"
+
+ "bne $t8, $zero, unaligned \n"
+ "negu $a3, %[dst] \n"
+ // make dst/src aligned
+ "andi $a3, $a3, 0x3 \n"
+ "beq $a3, $zero, $chk16w \n"
+ // word-aligned now count is the remining bytes count
+ "subu %[count], %[count], $a3 \n"
+
+ "lwr $t8, 0(%[src]) \n"
+ "addu %[src], %[src], $a3 \n"
+ "swr $t8, 0(%[dst]) \n"
+ "addu %[dst], %[dst], $a3 \n"
+
+ // Now the dst/src are mutually word-aligned with word-aligned addresses
+ "$chk16w: \n"
+ "andi $t8, %[count], 0x3f \n" // whole 64-B chunks?
+ // t8 is the byte count after 64-byte chunks
+ "beq %[count], $t8, chk8w \n"
+ // There will be at most 1 32-byte chunk after it
+ "subu $a3, %[count], $t8 \n" // the reminder
+ // Here a3 counts bytes in 16w chunks
+ "addu $a3, %[dst], $a3 \n"
+ // Now a3 is the final dst after 64-byte chunks
+ "addu $t0, %[dst], %[count] \n"
+ // t0 is the "past the end" address
+
+ // When in the loop we exercise "pref 30,x(a1)", the a1+x should not be past
+ // the "t0-32" address
+ // This means: for x=128 the last "safe" a1 address is "t0-160"
+ // Alternatively, for x=64 the last "safe" a1 address is "t0-96"
+ // we will use "pref 30,128(a1)", so "t0-160" is the limit
+ "subu $t9, $t0, 160 \n"
+ // t9 is the "last safe pref 30,128(a1)" address
+ "pref 0, 0(%[src]) \n" // first line of src
+ "pref 0, 32(%[src]) \n" // second line of src
+ "pref 0, 64(%[src]) \n"
+ "pref 30, 32(%[dst]) \n"
+ // In case the a1 > t9 don't use "pref 30" at all
+ "sgtu $v1, %[dst], $t9 \n"
+ "bgtz $v1, $loop16w \n"
+ "nop \n"
+ // otherwise, start with using pref30
+ "pref 30, 64(%[dst]) \n"
+ "$loop16w: \n"
+ "pref 0, 96(%[src]) \n"
+ "lw $t0, 0(%[src]) \n"
+ "bgtz $v1, $skip_pref30_96 \n" // skip
+ "lw $t1, 4(%[src]) \n"
+ "pref 30, 96(%[dst]) \n" // continue
+ "$skip_pref30_96: \n"
+ "lw $t2, 8(%[src]) \n"
+ "lw $t3, 12(%[src]) \n"
+ "lw $t4, 16(%[src]) \n"
+ "lw $t5, 20(%[src]) \n"
+ "lw $t6, 24(%[src]) \n"
+ "lw $t7, 28(%[src]) \n"
+ "pref 0, 128(%[src]) \n"
+ // bring the next lines of src, addr 128
+ "sw $t0, 0(%[dst]) \n"
+ "sw $t1, 4(%[dst]) \n"
+ "sw $t2, 8(%[dst]) \n"
+ "sw $t3, 12(%[dst]) \n"
+ "sw $t4, 16(%[dst]) \n"
+ "sw $t5, 20(%[dst]) \n"
+ "sw $t6, 24(%[dst]) \n"
+ "sw $t7, 28(%[dst]) \n"
+ "lw $t0, 32(%[src]) \n"
+ "bgtz $v1, $skip_pref30_128 \n" // skip pref 30,128(a1)
+ "lw $t1, 36(%[src]) \n"
+ "pref 30, 128(%[dst]) \n" // set dest, addr 128
+ "$skip_pref30_128: \n"
+ "lw $t2, 40(%[src]) \n"
+ "lw $t3, 44(%[src]) \n"
+ "lw $t4, 48(%[src]) \n"
+ "lw $t5, 52(%[src]) \n"
+ "lw $t6, 56(%[src]) \n"
+ "lw $t7, 60(%[src]) \n"
+ "pref 0, 160(%[src]) \n"
+ // bring the next lines of src, addr 160
+ "sw $t0, 32(%[dst]) \n"
+ "sw $t1, 36(%[dst]) \n"
+ "sw $t2, 40(%[dst]) \n"
+ "sw $t3, 44(%[dst]) \n"
+ "sw $t4, 48(%[dst]) \n"
+ "sw $t5, 52(%[dst]) \n"
+ "sw $t6, 56(%[dst]) \n"
+ "sw $t7, 60(%[dst]) \n"
+
+ "addiu %[dst], %[dst], 64 \n" // adding 64 to dest
+ "sgtu $v1, %[dst], $t9 \n"
+ "bne %[dst], $a3, $loop16w \n"
+ " addiu %[src], %[src], 64 \n" // adding 64 to src
+ "move %[count], $t8 \n"
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go
+
+ "chk8w: \n"
+ "pref 0, 0x0(%[src]) \n"
+ "andi $t8, %[count], 0x1f \n" // 32-byte chunk?
+ // the t8 is the reminder count past 32-bytes
+ "beq %[count], $t8, chk1w \n"
+ // count=t8,no 32-byte chunk
+ " nop \n"
+
+ "lw $t0, 0(%[src]) \n"
+ "lw $t1, 4(%[src]) \n"
+ "lw $t2, 8(%[src]) \n"
+ "lw $t3, 12(%[src]) \n"
+ "lw $t4, 16(%[src]) \n"
+ "lw $t5, 20(%[src]) \n"
+ "lw $t6, 24(%[src]) \n"
+ "lw $t7, 28(%[src]) \n"
+ "addiu %[src], %[src], 32 \n"
+
+ "sw $t0, 0(%[dst]) \n"
+ "sw $t1, 4(%[dst]) \n"
+ "sw $t2, 8(%[dst]) \n"
+ "sw $t3, 12(%[dst]) \n"
+ "sw $t4, 16(%[dst]) \n"
+ "sw $t5, 20(%[dst]) \n"
+ "sw $t6, 24(%[dst]) \n"
+ "sw $t7, 28(%[dst]) \n"
+ "addiu %[dst], %[dst], 32 \n"
+
+ "chk1w: \n"
+ "andi %[count], $t8, 0x3 \n"
+ // now count is the reminder past 1w chunks
+ "beq %[count], $t8, $last8 \n"
+ " subu $a3, $t8, %[count] \n"
+ // a3 is count of bytes in 1w chunks
+ "addu $a3, %[dst], $a3 \n"
+ // now a3 is the dst address past the 1w chunks
+ // copying in words (4-byte chunks)
+ "$wordCopy_loop: \n"
+ "lw $t3, 0(%[src]) \n"
+ // the first t3 may be equal t0 ... optimize?
+ "addiu %[src], %[src],4 \n"
+ "addiu %[dst], %[dst],4 \n"
+ "bne %[dst], $a3,$wordCopy_loop \n"
+ " sw $t3, -4(%[dst]) \n"
+
+ // For the last (<8) bytes
+ "$last8: \n"
+ "blez %[count], leave \n"
+ " addu $a3, %[dst], %[count] \n" // a3 -last dst address
+ "$last8loop: \n"
+ "lb $v1, 0(%[src]) \n"
+ "addiu %[src], %[src], 1 \n"
+ "addiu %[dst], %[dst], 1 \n"
+ "bne %[dst], $a3, $last8loop \n"
+ " sb $v1, -1(%[dst]) \n"
+
+ "leave: \n"
+ " j $ra \n"
+ " nop \n"
+
+ //
+ // UNALIGNED case
+ //
+
+ "unaligned: \n"
+ // got here with a3="negu a1"
+ "andi $a3, $a3, 0x3 \n" // a1 is word aligned?
+ "beqz $a3, $ua_chk16w \n"
+ " subu %[count], %[count], $a3 \n"
+ // bytes left after initial a3 bytes
+ "lwr $v1, 0(%[src]) \n"
+ "lwl $v1, 3(%[src]) \n"
+ "addu %[src], %[src], $a3 \n" // a3 may be 1, 2 or 3
+ "swr $v1, 0(%[dst]) \n"
+ "addu %[dst], %[dst], $a3 \n"
+ // below the dst will be word aligned (NOTE1)
+ "$ua_chk16w: \n"
+ "andi $t8, %[count], 0x3f \n" // whole 64-B chunks?
+ // t8 is the byte count after 64-byte chunks
+ "beq %[count], $t8, ua_chk8w \n"
+ // if a2==t8, no 64-byte chunks
+ // There will be at most 1 32-byte chunk after it
+ "subu $a3, %[count], $t8 \n" // the reminder
+ // Here a3 counts bytes in 16w chunks
+ "addu $a3, %[dst], $a3 \n"
+ // Now a3 is the final dst after 64-byte chunks
+ "addu $t0, %[dst], %[count] \n" // t0 "past the end"
+ "subu $t9, $t0, 160 \n"
+ // t9 is the "last safe pref 30,128(a1)" address
+ "pref 0, 0(%[src]) \n" // first line of src
+ "pref 0, 32(%[src]) \n" // second line addr 32
+ "pref 0, 64(%[src]) \n"
+ "pref 30, 32(%[dst]) \n"
+ // safe, as we have at least 64 bytes ahead
+ // In case the a1 > t9 don't use "pref 30" at all
+ "sgtu $v1, %[dst], $t9 \n"
+ "bgtz $v1, $ua_loop16w \n"
+ // skip "pref 30,64(a1)" for too short arrays
+ " nop \n"
+ // otherwise, start with using pref30
+ "pref 30, 64(%[dst]) \n"
+ "$ua_loop16w: \n"
+ "pref 0, 96(%[src]) \n"
+ "lwr $t0, 0(%[src]) \n"
+ "lwl $t0, 3(%[src]) \n"
+ "lwr $t1, 4(%[src]) \n"
+ "bgtz $v1, $ua_skip_pref30_96 \n"
+ " lwl $t1, 7(%[src]) \n"
+ "pref 30, 96(%[dst]) \n"
+ // continue setting up the dest, addr 96
+ "$ua_skip_pref30_96: \n"
+ "lwr $t2, 8(%[src]) \n"
+ "lwl $t2, 11(%[src]) \n"
+ "lwr $t3, 12(%[src]) \n"
+ "lwl $t3, 15(%[src]) \n"
+ "lwr $t4, 16(%[src]) \n"
+ "lwl $t4, 19(%[src]) \n"
+ "lwr $t5, 20(%[src]) \n"
+ "lwl $t5, 23(%[src]) \n"
+ "lwr $t6, 24(%[src]) \n"
+ "lwl $t6, 27(%[src]) \n"
+ "lwr $t7, 28(%[src]) \n"
+ "lwl $t7, 31(%[src]) \n"
+ "pref 0, 128(%[src]) \n"
+ // bring the next lines of src, addr 128
+ "sw $t0, 0(%[dst]) \n"
+ "sw $t1, 4(%[dst]) \n"
+ "sw $t2, 8(%[dst]) \n"
+ "sw $t3, 12(%[dst]) \n"
+ "sw $t4, 16(%[dst]) \n"
+ "sw $t5, 20(%[dst]) \n"
+ "sw $t6, 24(%[dst]) \n"
+ "sw $t7, 28(%[dst]) \n"
+ "lwr $t0, 32(%[src]) \n"
+ "lwl $t0, 35(%[src]) \n"
+ "lwr $t1, 36(%[src]) \n"
+ "bgtz $v1, ua_skip_pref30_128 \n"
+ " lwl $t1, 39(%[src]) \n"
+ "pref 30, 128(%[dst]) \n"
+ // continue setting up the dest, addr 128
+ "ua_skip_pref30_128: \n"
+
+ "lwr $t2, 40(%[src]) \n"
+ "lwl $t2, 43(%[src]) \n"
+ "lwr $t3, 44(%[src]) \n"
+ "lwl $t3, 47(%[src]) \n"
+ "lwr $t4, 48(%[src]) \n"
+ "lwl $t4, 51(%[src]) \n"
+ "lwr $t5, 52(%[src]) \n"
+ "lwl $t5, 55(%[src]) \n"
+ "lwr $t6, 56(%[src]) \n"
+ "lwl $t6, 59(%[src]) \n"
+ "lwr $t7, 60(%[src]) \n"
+ "lwl $t7, 63(%[src]) \n"
+ "pref 0, 160(%[src]) \n"
+ // bring the next lines of src, addr 160
+ "sw $t0, 32(%[dst]) \n"
+ "sw $t1, 36(%[dst]) \n"
+ "sw $t2, 40(%[dst]) \n"
+ "sw $t3, 44(%[dst]) \n"
+ "sw $t4, 48(%[dst]) \n"
+ "sw $t5, 52(%[dst]) \n"
+ "sw $t6, 56(%[dst]) \n"
+ "sw $t7, 60(%[dst]) \n"
+
+ "addiu %[dst],%[dst],64 \n" // adding 64 to dest
+ "sgtu $v1,%[dst],$t9 \n"
+ "bne %[dst],$a3,$ua_loop16w \n"
+ " addiu %[src],%[src],64 \n" // adding 64 to src
+ "move %[count],$t8 \n"
+
+ // Here we have src and dest word-aligned but less than 64-bytes to go
+
+ "ua_chk8w: \n"
+ "pref 0, 0x0(%[src]) \n"
+ "andi $t8, %[count], 0x1f \n" // 32-byte chunk?
+ // the t8 is the reminder count
+ "beq %[count], $t8, $ua_chk1w \n"
+ // when count==t8, no 32-byte chunk
+
+ "lwr $t0, 0(%[src]) \n"
+ "lwl $t0, 3(%[src]) \n"
+ "lwr $t1, 4(%[src]) \n"
+ "lwl $t1, 7(%[src]) \n"
+ "lwr $t2, 8(%[src]) \n"
+ "lwl $t2, 11(%[src]) \n"
+ "lwr $t3, 12(%[src]) \n"
+ "lwl $t3, 15(%[src]) \n"
+ "lwr $t4, 16(%[src]) \n"
+ "lwl $t4, 19(%[src]) \n"
+ "lwr $t5, 20(%[src]) \n"
+ "lwl $t5, 23(%[src]) \n"
+ "lwr $t6, 24(%[src]) \n"
+ "lwl $t6, 27(%[src]) \n"
+ "lwr $t7, 28(%[src]) \n"
+ "lwl $t7, 31(%[src]) \n"
+ "addiu %[src], %[src], 32 \n"
+
+ "sw $t0, 0(%[dst]) \n"
+ "sw $t1, 4(%[dst]) \n"
+ "sw $t2, 8(%[dst]) \n"
+ "sw $t3, 12(%[dst]) \n"
+ "sw $t4, 16(%[dst]) \n"
+ "sw $t5, 20(%[dst]) \n"
+ "sw $t6, 24(%[dst]) \n"
+ "sw $t7, 28(%[dst]) \n"
+ "addiu %[dst], %[dst], 32 \n"
+
+ "$ua_chk1w: \n"
+ "andi %[count], $t8, 0x3 \n"
+ // now count is the reminder past 1w chunks
+ "beq %[count], $t8, ua_smallCopy \n"
+ "subu $a3, $t8, %[count] \n"
+ // a3 is count of bytes in 1w chunks
+ "addu $a3, %[dst], $a3 \n"
+ // now a3 is the dst address past the 1w chunks
+
+ // copying in words (4-byte chunks)
+ "$ua_wordCopy_loop: \n"
+ "lwr $v1, 0(%[src]) \n"
+ "lwl $v1, 3(%[src]) \n"
+ "addiu %[src], %[src], 4 \n"
+ "addiu %[dst], %[dst], 4 \n"
+ // note: dst=a1 is word aligned here, see NOTE1
+ "bne %[dst], $a3, $ua_wordCopy_loop \n"
+ " sw $v1,-4(%[dst]) \n"
+
+ // Now less than 4 bytes (value in count) left to copy
+ "ua_smallCopy: \n"
+ "beqz %[count], leave \n"
+ " addu $a3, %[dst], %[count] \n" // a3 = last dst address
+ "$ua_smallCopy_loop: \n"
+ "lb $v1, 0(%[src]) \n"
+ "addiu %[src], %[src], 1 \n"
+ "addiu %[dst], %[dst], 1 \n"
+ "bne %[dst],$a3,$ua_smallCopy_loop \n"
+ " sb $v1, -1(%[dst]) \n"
+
+ "j $ra \n"
+ " nop \n"
+ ".set at \n"
+ ".set reorder \n"
+ : [dst] "+r" (dst), [src] "+r" (src)
+ : [count] "r" (count)
+ : "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7",
+ "t8", "t9", "a3", "v1", "at"
+ );
+}
+#endif // HAS_COPYROW_MIPS
+
+// MIPS DSPR2 functions
+#if !defined(LIBYUV_DISABLE_MIPS) && defined(__mips_dsp) && \
+ (__mips_dsp_rev >= 2) && \
+ (_MIPS_SIM == _MIPS_SIM_ABI32) && (__mips_isa_rev < 6)
+
+void SplitUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "srl $t4, %[width], 4 \n" // multiplies of 16
+ "blez $t4, 2f \n"
+ " andi %[width], %[width], 0xf \n" // residual
+
+ ".p2align 2 \n"
+ "1: \n"
+ "addiu $t4, $t4, -1 \n"
+ "lw $t0, 0(%[src_uv]) \n" // V1 | U1 | V0 | U0
+ "lw $t1, 4(%[src_uv]) \n" // V3 | U3 | V2 | U2
+ "lw $t2, 8(%[src_uv]) \n" // V5 | U5 | V4 | U4
+ "lw $t3, 12(%[src_uv]) \n" // V7 | U7 | V6 | U6
+ "lw $t5, 16(%[src_uv]) \n" // V9 | U9 | V8 | U8
+ "lw $t6, 20(%[src_uv]) \n" // V11 | U11 | V10 | U10
+ "lw $t7, 24(%[src_uv]) \n" // V13 | U13 | V12 | U12
+ "lw $t8, 28(%[src_uv]) \n" // V15 | U15 | V14 | U14
+ "addiu %[src_uv], %[src_uv], 32 \n"
+ "precrq.qb.ph $t9, $t1, $t0 \n" // V3 | V2 | V1 | V0
+ "precr.qb.ph $t0, $t1, $t0 \n" // U3 | U2 | U1 | U0
+ "precrq.qb.ph $t1, $t3, $t2 \n" // V7 | V6 | V5 | V4
+ "precr.qb.ph $t2, $t3, $t2 \n" // U7 | U6 | U5 | U4
+ "precrq.qb.ph $t3, $t6, $t5 \n" // V11 | V10 | V9 | V8
+ "precr.qb.ph $t5, $t6, $t5 \n" // U11 | U10 | U9 | U8
+ "precrq.qb.ph $t6, $t8, $t7 \n" // V15 | V14 | V13 | V12
+ "precr.qb.ph $t7, $t8, $t7 \n" // U15 | U14 | U13 | U12
+ "sw $t9, 0(%[dst_v]) \n"
+ "sw $t0, 0(%[dst_u]) \n"
+ "sw $t1, 4(%[dst_v]) \n"
+ "sw $t2, 4(%[dst_u]) \n"
+ "sw $t3, 8(%[dst_v]) \n"
+ "sw $t5, 8(%[dst_u]) \n"
+ "sw $t6, 12(%[dst_v]) \n"
+ "sw $t7, 12(%[dst_u]) \n"
+ "addiu %[dst_v], %[dst_v], 16 \n"
+ "bgtz $t4, 1b \n"
+ " addiu %[dst_u], %[dst_u], 16 \n"
+
+ "beqz %[width], 3f \n"
+ " nop \n"
+
+ "2: \n"
+ "lbu $t0, 0(%[src_uv]) \n"
+ "lbu $t1, 1(%[src_uv]) \n"
+ "addiu %[src_uv], %[src_uv], 2 \n"
+ "addiu %[width], %[width], -1 \n"
+ "sb $t0, 0(%[dst_u]) \n"
+ "sb $t1, 0(%[dst_v]) \n"
+ "addiu %[dst_u], %[dst_u], 1 \n"
+ "bgtz %[width], 2b \n"
+ " addiu %[dst_v], %[dst_v], 1 \n"
+
+ "3: \n"
+ ".set pop \n"
+ : [src_uv] "+r" (src_uv),
+ [width] "+r" (width),
+ [dst_u] "+r" (dst_u),
+ [dst_v] "+r" (dst_v)
+ :
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7", "t8", "t9"
+ );
+}
+
+void MirrorRow_MIPS_DSPR2(const uint8* src, uint8* dst, int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ "srl $t4, %[width], 4 \n" // multiplies of 16
+ "andi $t5, %[width], 0xf \n"
+ "blez $t4, 2f \n"
+ " addu %[src], %[src], %[width] \n" // src += width
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, -16(%[src]) \n" // |3|2|1|0|
+ "lw $t1, -12(%[src]) \n" // |7|6|5|4|
+ "lw $t2, -8(%[src]) \n" // |11|10|9|8|
+ "lw $t3, -4(%[src]) \n" // |15|14|13|12|
+ "wsbh $t0, $t0 \n" // |2|3|0|1|
+ "wsbh $t1, $t1 \n" // |6|7|4|5|
+ "wsbh $t2, $t2 \n" // |10|11|8|9|
+ "wsbh $t3, $t3 \n" // |14|15|12|13|
+ "rotr $t0, $t0, 16 \n" // |0|1|2|3|
+ "rotr $t1, $t1, 16 \n" // |4|5|6|7|
+ "rotr $t2, $t2, 16 \n" // |8|9|10|11|
+ "rotr $t3, $t3, 16 \n" // |12|13|14|15|
+ "addiu %[src], %[src], -16 \n"
+ "addiu $t4, $t4, -1 \n"
+ "sw $t3, 0(%[dst]) \n" // |15|14|13|12|
+ "sw $t2, 4(%[dst]) \n" // |11|10|9|8|
+ "sw $t1, 8(%[dst]) \n" // |7|6|5|4|
+ "sw $t0, 12(%[dst]) \n" // |3|2|1|0|
+ "bgtz $t4, 1b \n"
+ " addiu %[dst], %[dst], 16 \n"
+ "beqz $t5, 3f \n"
+ " nop \n"
+
+ "2: \n"
+ "lbu $t0, -1(%[src]) \n"
+ "addiu $t5, $t5, -1 \n"
+ "addiu %[src], %[src], -1 \n"
+ "sb $t0, 0(%[dst]) \n"
+ "bgez $t5, 2b \n"
+ " addiu %[dst], %[dst], 1 \n"
+
+ "3: \n"
+ ".set pop \n"
+ : [src] "+r" (src), [dst] "+r" (dst)
+ : [width] "r" (width)
+ : "t0", "t1", "t2", "t3", "t4", "t5"
+ );
+}
+
+void MirrorUVRow_MIPS_DSPR2(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width) {
+ int x = 0;
+ int y = 0;
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ "addu $t4, %[width], %[width] \n"
+ "srl %[x], %[width], 4 \n"
+ "andi %[y], %[width], 0xf \n"
+ "blez %[x], 2f \n"
+ " addu %[src_uv], %[src_uv], $t4 \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, -32(%[src_uv]) \n" // |3|2|1|0|
+ "lw $t1, -28(%[src_uv]) \n" // |7|6|5|4|
+ "lw $t2, -24(%[src_uv]) \n" // |11|10|9|8|
+ "lw $t3, -20(%[src_uv]) \n" // |15|14|13|12|
+ "lw $t4, -16(%[src_uv]) \n" // |19|18|17|16|
+ "lw $t6, -12(%[src_uv]) \n" // |23|22|21|20|
+ "lw $t7, -8(%[src_uv]) \n" // |27|26|25|24|
+ "lw $t8, -4(%[src_uv]) \n" // |31|30|29|28|
+
+ "rotr $t0, $t0, 16 \n" // |1|0|3|2|
+ "rotr $t1, $t1, 16 \n" // |5|4|7|6|
+ "rotr $t2, $t2, 16 \n" // |9|8|11|10|
+ "rotr $t3, $t3, 16 \n" // |13|12|15|14|
+ "rotr $t4, $t4, 16 \n" // |17|16|19|18|
+ "rotr $t6, $t6, 16 \n" // |21|20|23|22|
+ "rotr $t7, $t7, 16 \n" // |25|24|27|26|
+ "rotr $t8, $t8, 16 \n" // |29|28|31|30|
+ "precr.qb.ph $t9, $t0, $t1 \n" // |0|2|4|6|
+ "precrq.qb.ph $t5, $t0, $t1 \n" // |1|3|5|7|
+ "precr.qb.ph $t0, $t2, $t3 \n" // |8|10|12|14|
+ "precrq.qb.ph $t1, $t2, $t3 \n" // |9|11|13|15|
+ "precr.qb.ph $t2, $t4, $t6 \n" // |16|18|20|22|
+ "precrq.qb.ph $t3, $t4, $t6 \n" // |17|19|21|23|
+ "precr.qb.ph $t4, $t7, $t8 \n" // |24|26|28|30|
+ "precrq.qb.ph $t6, $t7, $t8 \n" // |25|27|29|31|
+ "addiu %[src_uv], %[src_uv], -32 \n"
+ "addiu %[x], %[x], -1 \n"
+ "swr $t4, 0(%[dst_u]) \n"
+ "swl $t4, 3(%[dst_u]) \n" // |30|28|26|24|
+ "swr $t6, 0(%[dst_v]) \n"
+ "swl $t6, 3(%[dst_v]) \n" // |31|29|27|25|
+ "swr $t2, 4(%[dst_u]) \n"
+ "swl $t2, 7(%[dst_u]) \n" // |22|20|18|16|
+ "swr $t3, 4(%[dst_v]) \n"
+ "swl $t3, 7(%[dst_v]) \n" // |23|21|19|17|
+ "swr $t0, 8(%[dst_u]) \n"
+ "swl $t0, 11(%[dst_u]) \n" // |14|12|10|8|
+ "swr $t1, 8(%[dst_v]) \n"
+ "swl $t1, 11(%[dst_v]) \n" // |15|13|11|9|
+ "swr $t9, 12(%[dst_u]) \n"
+ "swl $t9, 15(%[dst_u]) \n" // |6|4|2|0|
+ "swr $t5, 12(%[dst_v]) \n"
+ "swl $t5, 15(%[dst_v]) \n" // |7|5|3|1|
+ "addiu %[dst_v], %[dst_v], 16 \n"
+ "bgtz %[x], 1b \n"
+ " addiu %[dst_u], %[dst_u], 16 \n"
+ "beqz %[y], 3f \n"
+ " nop \n"
+ "b 2f \n"
+ " nop \n"
+
+ "2: \n"
+ "lbu $t0, -2(%[src_uv]) \n"
+ "lbu $t1, -1(%[src_uv]) \n"
+ "addiu %[src_uv], %[src_uv], -2 \n"
+ "addiu %[y], %[y], -1 \n"
+ "sb $t0, 0(%[dst_u]) \n"
+ "sb $t1, 0(%[dst_v]) \n"
+ "addiu %[dst_u], %[dst_u], 1 \n"
+ "bgtz %[y], 2b \n"
+ " addiu %[dst_v], %[dst_v], 1 \n"
+
+ "3: \n"
+ ".set pop \n"
+ : [src_uv] "+r" (src_uv),
+ [dst_u] "+r" (dst_u),
+ [dst_v] "+r" (dst_v),
+ [x] "=&r" (x),
+ [y] "+r" (y)
+ : [width] "r" (width)
+ : "t0", "t1", "t2", "t3", "t4",
+ "t5", "t7", "t8", "t9"
+ );
+}
+
+// Convert (4 Y and 2 VU) I422 and arrange RGB values into
+// t5 = | 0 | B0 | 0 | b0 |
+// t4 = | 0 | B1 | 0 | b1 |
+// t9 = | 0 | G0 | 0 | g0 |
+// t8 = | 0 | G1 | 0 | g1 |
+// t2 = | 0 | R0 | 0 | r0 |
+// t1 = | 0 | R1 | 0 | r1 |
+#define I422ToTransientMipsRGB \
+ "lw $t0, 0(%[y_buf]) \n" \
+ "lhu $t1, 0(%[u_buf]) \n" \
+ "lhu $t2, 0(%[v_buf]) \n" \
+ "preceu.ph.qbr $t1, $t1 \n" \
+ "preceu.ph.qbr $t2, $t2 \n" \
+ "preceu.ph.qbra $t3, $t0 \n" \
+ "preceu.ph.qbla $t0, $t0 \n" \
+ "subu.ph $t1, $t1, $s5 \n" \
+ "subu.ph $t2, $t2, $s5 \n" \
+ "subu.ph $t3, $t3, $s4 \n" \
+ "subu.ph $t0, $t0, $s4 \n" \
+ "mul.ph $t3, $t3, $s0 \n" \
+ "mul.ph $t0, $t0, $s0 \n" \
+ "shll.ph $t4, $t1, 0x7 \n" \
+ "subu.ph $t4, $t4, $t1 \n" \
+ "mul.ph $t6, $t1, $s1 \n" \
+ "mul.ph $t1, $t2, $s2 \n" \
+ "addq_s.ph $t5, $t4, $t3 \n" \
+ "addq_s.ph $t4, $t4, $t0 \n" \
+ "shra.ph $t5, $t5, 6 \n" \
+ "shra.ph $t4, $t4, 6 \n" \
+ "addiu %[u_buf], 2 \n" \
+ "addiu %[v_buf], 2 \n" \
+ "addu.ph $t6, $t6, $t1 \n" \
+ "mul.ph $t1, $t2, $s3 \n" \
+ "addu.ph $t9, $t6, $t3 \n" \
+ "addu.ph $t8, $t6, $t0 \n" \
+ "shra.ph $t9, $t9, 6 \n" \
+ "shra.ph $t8, $t8, 6 \n" \
+ "addu.ph $t2, $t1, $t3 \n" \
+ "addu.ph $t1, $t1, $t0 \n" \
+ "shra.ph $t2, $t2, 6 \n" \
+ "shra.ph $t1, $t1, 6 \n" \
+ "subu.ph $t5, $t5, $s5 \n" \
+ "subu.ph $t4, $t4, $s5 \n" \
+ "subu.ph $t9, $t9, $s5 \n" \
+ "subu.ph $t8, $t8, $s5 \n" \
+ "subu.ph $t2, $t2, $s5 \n" \
+ "subu.ph $t1, $t1, $s5 \n" \
+ "shll_s.ph $t5, $t5, 8 \n" \
+ "shll_s.ph $t4, $t4, 8 \n" \
+ "shll_s.ph $t9, $t9, 8 \n" \
+ "shll_s.ph $t8, $t8, 8 \n" \
+ "shll_s.ph $t2, $t2, 8 \n" \
+ "shll_s.ph $t1, $t1, 8 \n" \
+ "shra.ph $t5, $t5, 8 \n" \
+ "shra.ph $t4, $t4, 8 \n" \
+ "shra.ph $t9, $t9, 8 \n" \
+ "shra.ph $t8, $t8, 8 \n" \
+ "shra.ph $t2, $t2, 8 \n" \
+ "shra.ph $t1, $t1, 8 \n" \
+ "addu.ph $t5, $t5, $s5 \n" \
+ "addu.ph $t4, $t4, $s5 \n" \
+ "addu.ph $t9, $t9, $s5 \n" \
+ "addu.ph $t8, $t8, $s5 \n" \
+ "addu.ph $t2, $t2, $s5 \n" \
+ "addu.ph $t1, $t1, $s5 \n"
+
+void I422ToARGBRow_MIPS_DSPR2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "beqz %[width], 2f \n"
+ " repl.ph $s0, 74 \n" // |YG|YG| = |74|74|
+ "repl.ph $s1, -25 \n" // |UG|UG| = |-25|-25|
+ "repl.ph $s2, -52 \n" // |VG|VG| = |-52|-52|
+ "repl.ph $s3, 102 \n" // |VR|VR| = |102|102|
+ "repl.ph $s4, 16 \n" // |0|16|0|16|
+ "repl.ph $s5, 128 \n" // |128|128| // clipping
+ "lui $s6, 0xff00 \n"
+ "ori $s6, 0xff00 \n" // |ff|00|ff|00|ff|
+
+ ".p2align 2 \n"
+ "1: \n"
+ I422ToTransientMipsRGB
+// Arranging into argb format
+ "precr.qb.ph $t4, $t8, $t4 \n" // |G1|g1|B1|b1|
+ "precr.qb.ph $t5, $t9, $t5 \n" // |G0|g0|B0|b0|
+ "addiu %[width], -4 \n"
+ "precrq.qb.ph $t8, $t4, $t5 \n" // |G1|B1|G0|B0|
+ "precr.qb.ph $t9, $t4, $t5 \n" // |g1|b1|g0|b0|
+ "precr.qb.ph $t2, $t1, $t2 \n" // |R1|r1|R0|r0|
+
+ "addiu %[y_buf], 4 \n"
+ "preceu.ph.qbla $t1, $t2 \n" // |0 |R1|0 |R0|
+ "preceu.ph.qbra $t2, $t2 \n" // |0 |r1|0 |r0|
+ "or $t1, $t1, $s6 \n" // |ff|R1|ff|R0|
+ "or $t2, $t2, $s6 \n" // |ff|r1|ff|r0|
+ "precrq.ph.w $t0, $t2, $t9 \n" // |ff|r1|g1|b1|
+ "precrq.ph.w $t3, $t1, $t8 \n" // |ff|R1|G1|B1|
+ "sll $t9, $t9, 16 \n"
+ "sll $t8, $t8, 16 \n"
+ "packrl.ph $t2, $t2, $t9 \n" // |ff|r0|g0|b0|
+ "packrl.ph $t1, $t1, $t8 \n" // |ff|R0|G0|B0|
+// Store results.
+ "sw $t2, 0(%[rgb_buf]) \n"
+ "sw $t0, 4(%[rgb_buf]) \n"
+ "sw $t1, 8(%[rgb_buf]) \n"
+ "sw $t3, 12(%[rgb_buf]) \n"
+ "bnez %[width], 1b \n"
+ " addiu %[rgb_buf], 16 \n"
+ "2: \n"
+ ".set pop \n"
+ :[y_buf] "+r" (y_buf),
+ [u_buf] "+r" (u_buf),
+ [v_buf] "+r" (v_buf),
+ [width] "+r" (width),
+ [rgb_buf] "+r" (rgb_buf)
+ :
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9",
+ "s0", "s1", "s2", "s3",
+ "s4", "s5", "s6"
+ );
+}
+
+void I422ToABGRRow_MIPS_DSPR2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "beqz %[width], 2f \n"
+ " repl.ph $s0, 74 \n" // |YG|YG| = |74|74|
+ "repl.ph $s1, -25 \n" // |UG|UG| = |-25|-25|
+ "repl.ph $s2, -52 \n" // |VG|VG| = |-52|-52|
+ "repl.ph $s3, 102 \n" // |VR|VR| = |102|102|
+ "repl.ph $s4, 16 \n" // |0|16|0|16|
+ "repl.ph $s5, 128 \n" // |128|128|
+ "lui $s6, 0xff00 \n"
+ "ori $s6, 0xff00 \n" // |ff|00|ff|00|
+
+ ".p2align 2 \n"
+ "1: \n"
+ I422ToTransientMipsRGB
+// Arranging into abgr format
+ "precr.qb.ph $t0, $t8, $t1 \n" // |G1|g1|R1|r1|
+ "precr.qb.ph $t3, $t9, $t2 \n" // |G0|g0|R0|r0|
+ "precrq.qb.ph $t8, $t0, $t3 \n" // |G1|R1|G0|R0|
+ "precr.qb.ph $t9, $t0, $t3 \n" // |g1|r1|g0|r0|
+
+ "precr.qb.ph $t2, $t4, $t5 \n" // |B1|b1|B0|b0|
+ "addiu %[width], -4 \n"
+ "addiu %[y_buf], 4 \n"
+ "preceu.ph.qbla $t1, $t2 \n" // |0 |B1|0 |B0|
+ "preceu.ph.qbra $t2, $t2 \n" // |0 |b1|0 |b0|
+ "or $t1, $t1, $s6 \n" // |ff|B1|ff|B0|
+ "or $t2, $t2, $s6 \n" // |ff|b1|ff|b0|
+ "precrq.ph.w $t0, $t2, $t9 \n" // |ff|b1|g1|r1|
+ "precrq.ph.w $t3, $t1, $t8 \n" // |ff|B1|G1|R1|
+ "sll $t9, $t9, 16 \n"
+ "sll $t8, $t8, 16 \n"
+ "packrl.ph $t2, $t2, $t9 \n" // |ff|b0|g0|r0|
+ "packrl.ph $t1, $t1, $t8 \n" // |ff|B0|G0|R0|
+// Store results.
+ "sw $t2, 0(%[rgb_buf]) \n"
+ "sw $t0, 4(%[rgb_buf]) \n"
+ "sw $t1, 8(%[rgb_buf]) \n"
+ "sw $t3, 12(%[rgb_buf]) \n"
+ "bnez %[width], 1b \n"
+ " addiu %[rgb_buf], 16 \n"
+ "2: \n"
+ ".set pop \n"
+ :[y_buf] "+r" (y_buf),
+ [u_buf] "+r" (u_buf),
+ [v_buf] "+r" (v_buf),
+ [width] "+r" (width),
+ [rgb_buf] "+r" (rgb_buf)
+ :
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9",
+ "s0", "s1", "s2", "s3",
+ "s4", "s5", "s6"
+ );
+}
+
+void I422ToBGRARow_MIPS_DSPR2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "beqz %[width], 2f \n"
+ " repl.ph $s0, 74 \n" // |YG|YG| = |74 |74 |
+ "repl.ph $s1, -25 \n" // |UG|UG| = |-25|-25|
+ "repl.ph $s2, -52 \n" // |VG|VG| = |-52|-52|
+ "repl.ph $s3, 102 \n" // |VR|VR| = |102|102|
+ "repl.ph $s4, 16 \n" // |0|16|0|16|
+ "repl.ph $s5, 128 \n" // |128|128|
+ "lui $s6, 0xff \n"
+ "ori $s6, 0xff \n" // |00|ff|00|ff|
+
+ ".p2align 2 \n"
+ "1: \n"
+ I422ToTransientMipsRGB
+ // Arranging into bgra format
+ "precr.qb.ph $t4, $t4, $t8 \n" // |B1|b1|G1|g1|
+ "precr.qb.ph $t5, $t5, $t9 \n" // |B0|b0|G0|g0|
+ "precrq.qb.ph $t8, $t4, $t5 \n" // |B1|G1|B0|G0|
+ "precr.qb.ph $t9, $t4, $t5 \n" // |b1|g1|b0|g0|
+
+ "precr.qb.ph $t2, $t1, $t2 \n" // |R1|r1|R0|r0|
+ "addiu %[width], -4 \n"
+ "addiu %[y_buf], 4 \n"
+ "preceu.ph.qbla $t1, $t2 \n" // |0 |R1|0 |R0|
+ "preceu.ph.qbra $t2, $t2 \n" // |0 |r1|0 |r0|
+ "sll $t1, $t1, 8 \n" // |R1|0 |R0|0 |
+ "sll $t2, $t2, 8 \n" // |r1|0 |r0|0 |
+ "or $t1, $t1, $s6 \n" // |R1|ff|R0|ff|
+ "or $t2, $t2, $s6 \n" // |r1|ff|r0|ff|
+ "precrq.ph.w $t0, $t9, $t2 \n" // |b1|g1|r1|ff|
+ "precrq.ph.w $t3, $t8, $t1 \n" // |B1|G1|R1|ff|
+ "sll $t1, $t1, 16 \n"
+ "sll $t2, $t2, 16 \n"
+ "packrl.ph $t2, $t9, $t2 \n" // |b0|g0|r0|ff|
+ "packrl.ph $t1, $t8, $t1 \n" // |B0|G0|R0|ff|
+// Store results.
+ "sw $t2, 0(%[rgb_buf]) \n"
+ "sw $t0, 4(%[rgb_buf]) \n"
+ "sw $t1, 8(%[rgb_buf]) \n"
+ "sw $t3, 12(%[rgb_buf]) \n"
+ "bnez %[width], 1b \n"
+ " addiu %[rgb_buf], 16 \n"
+ "2: \n"
+ ".set pop \n"
+ :[y_buf] "+r" (y_buf),
+ [u_buf] "+r" (u_buf),
+ [v_buf] "+r" (v_buf),
+ [width] "+r" (width),
+ [rgb_buf] "+r" (rgb_buf)
+ :
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9",
+ "s0", "s1", "s2", "s3",
+ "s4", "s5", "s6"
+ );
+}
+
+// Bilinear filter 8x2 -> 8x1
+void InterpolateRow_MIPS_DSPR2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ int y0_fraction = 256 - source_y_fraction;
+ const uint8* src_ptr1 = src_ptr + src_stride;
+
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ "replv.ph $t0, %[y0_fraction] \n"
+ "replv.ph $t1, %[source_y_fraction] \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t2, 0(%[src_ptr]) \n"
+ "lw $t3, 0(%[src_ptr1]) \n"
+ "lw $t4, 4(%[src_ptr]) \n"
+ "lw $t5, 4(%[src_ptr1]) \n"
+ "muleu_s.ph.qbl $t6, $t2, $t0 \n"
+ "muleu_s.ph.qbr $t7, $t2, $t0 \n"
+ "muleu_s.ph.qbl $t8, $t3, $t1 \n"
+ "muleu_s.ph.qbr $t9, $t3, $t1 \n"
+ "muleu_s.ph.qbl $t2, $t4, $t0 \n"
+ "muleu_s.ph.qbr $t3, $t4, $t0 \n"
+ "muleu_s.ph.qbl $t4, $t5, $t1 \n"
+ "muleu_s.ph.qbr $t5, $t5, $t1 \n"
+ "addq.ph $t6, $t6, $t8 \n"
+ "addq.ph $t7, $t7, $t9 \n"
+ "addq.ph $t2, $t2, $t4 \n"
+ "addq.ph $t3, $t3, $t5 \n"
+ "shra.ph $t6, $t6, 8 \n"
+ "shra.ph $t7, $t7, 8 \n"
+ "shra.ph $t2, $t2, 8 \n"
+ "shra.ph $t3, $t3, 8 \n"
+ "precr.qb.ph $t6, $t6, $t7 \n"
+ "precr.qb.ph $t2, $t2, $t3 \n"
+ "addiu %[src_ptr], %[src_ptr], 8 \n"
+ "addiu %[src_ptr1], %[src_ptr1], 8 \n"
+ "addiu %[dst_width], %[dst_width], -8 \n"
+ "sw $t6, 0(%[dst_ptr]) \n"
+ "sw $t2, 4(%[dst_ptr]) \n"
+ "bgtz %[dst_width], 1b \n"
+ " addiu %[dst_ptr], %[dst_ptr], 8 \n"
+
+ ".set pop \n"
+ : [dst_ptr] "+r" (dst_ptr),
+ [src_ptr1] "+r" (src_ptr1),
+ [src_ptr] "+r" (src_ptr),
+ [dst_width] "+r" (dst_width)
+ : [source_y_fraction] "r" (source_y_fraction),
+ [y0_fraction] "r" (y0_fraction),
+ [src_stride] "r" (src_stride)
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9"
+ );
+}
+#endif // __mips_dsp_rev >= 2
+
+#endif // defined(__mips__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_neon.cc b/media/libaom/src/third_party/libyuv/source/row_neon.cc
new file mode 100644
index 000000000..1a72eb903
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_neon.cc
@@ -0,0 +1,3084 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC Neon
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
+ !defined(__aarch64__)
+
+// Read 8 Y, 4 U and 4 V from 422
+#define READYUV422 \
+ MEMACCESS(0) \
+ "vld1.8 {d0}, [%0]! \n" \
+ MEMACCESS(1) \
+ "vld1.32 {d2[0]}, [%1]! \n" \
+ MEMACCESS(2) \
+ "vld1.32 {d2[1]}, [%2]! \n"
+
+// Read 8 Y, 2 U and 2 V from 422
+#define READYUV411 \
+ MEMACCESS(0) \
+ "vld1.8 {d0}, [%0]! \n" \
+ MEMACCESS(1) \
+ "vld1.16 {d2[0]}, [%1]! \n" \
+ MEMACCESS(2) \
+ "vld1.16 {d2[1]}, [%2]! \n" \
+ "vmov.u8 d3, d2 \n" \
+ "vzip.u8 d2, d3 \n"
+
+// Read 8 Y, 8 U and 8 V from 444
+#define READYUV444 \
+ MEMACCESS(0) \
+ "vld1.8 {d0}, [%0]! \n" \
+ MEMACCESS(1) \
+ "vld1.8 {d2}, [%1]! \n" \
+ MEMACCESS(2) \
+ "vld1.8 {d3}, [%2]! \n" \
+ "vpaddl.u8 q1, q1 \n" \
+ "vrshrn.u16 d2, q1, #1 \n"
+
+// Read 8 Y, and set 4 U and 4 V to 128
+#define READYUV400 \
+ MEMACCESS(0) \
+ "vld1.8 {d0}, [%0]! \n" \
+ "vmov.u8 d2, #128 \n"
+
+// Read 8 Y and 4 UV from NV12
+#define READNV12 \
+ MEMACCESS(0) \
+ "vld1.8 {d0}, [%0]! \n" \
+ MEMACCESS(1) \
+ "vld1.8 {d2}, [%1]! \n" \
+ "vmov.u8 d3, d2 \n"/* split odd/even uv apart */\
+ "vuzp.u8 d2, d3 \n" \
+ "vtrn.u32 d2, d3 \n"
+
+// Read 8 Y and 4 VU from NV21
+#define READNV21 \
+ MEMACCESS(0) \
+ "vld1.8 {d0}, [%0]! \n" \
+ MEMACCESS(1) \
+ "vld1.8 {d2}, [%1]! \n" \
+ "vmov.u8 d3, d2 \n"/* split odd/even uv apart */\
+ "vuzp.u8 d3, d2 \n" \
+ "vtrn.u32 d2, d3 \n"
+
+// Read 8 YUY2
+#define READYUY2 \
+ MEMACCESS(0) \
+ "vld2.8 {d0, d2}, [%0]! \n" \
+ "vmov.u8 d3, d2 \n" \
+ "vuzp.u8 d2, d3 \n" \
+ "vtrn.u32 d2, d3 \n"
+
+// Read 8 UYVY
+#define READUYVY \
+ MEMACCESS(0) \
+ "vld2.8 {d2, d3}, [%0]! \n" \
+ "vmov.u8 d0, d3 \n" \
+ "vmov.u8 d3, d2 \n" \
+ "vuzp.u8 d2, d3 \n" \
+ "vtrn.u32 d2, d3 \n"
+
+#define YUV422TORGB_SETUP_REG \
+ MEMACCESS([kUVToRB]) \
+ "vld1.8 {d24}, [%[kUVToRB]] \n" \
+ MEMACCESS([kUVToG]) \
+ "vld1.8 {d25}, [%[kUVToG]] \n" \
+ MEMACCESS([kUVBiasBGR]) \
+ "vld1.16 {d26[], d27[]}, [%[kUVBiasBGR]]! \n" \
+ MEMACCESS([kUVBiasBGR]) \
+ "vld1.16 {d8[], d9[]}, [%[kUVBiasBGR]]! \n" \
+ MEMACCESS([kUVBiasBGR]) \
+ "vld1.16 {d28[], d29[]}, [%[kUVBiasBGR]] \n" \
+ MEMACCESS([kYToRgb]) \
+ "vld1.32 {d30[], d31[]}, [%[kYToRgb]] \n"
+
+#define YUV422TORGB \
+ "vmull.u8 q8, d2, d24 \n" /* u/v B/R component */\
+ "vmull.u8 q9, d2, d25 \n" /* u/v G component */\
+ "vmovl.u8 q0, d0 \n" /* Y */\
+ "vmovl.s16 q10, d1 \n" \
+ "vmovl.s16 q0, d0 \n" \
+ "vmul.s32 q10, q10, q15 \n" \
+ "vmul.s32 q0, q0, q15 \n" \
+ "vqshrun.s32 d0, q0, #16 \n" \
+ "vqshrun.s32 d1, q10, #16 \n" /* Y */\
+ "vadd.s16 d18, d19 \n" \
+ "vshll.u16 q1, d16, #16 \n" /* Replicate u * UB */\
+ "vshll.u16 q10, d17, #16 \n" /* Replicate v * VR */\
+ "vshll.u16 q3, d18, #16 \n" /* Replicate (v*VG + u*UG)*/\
+ "vaddw.u16 q1, q1, d16 \n" \
+ "vaddw.u16 q10, q10, d17 \n" \
+ "vaddw.u16 q3, q3, d18 \n" \
+ "vqadd.s16 q8, q0, q13 \n" /* B */ \
+ "vqadd.s16 q9, q0, q14 \n" /* R */ \
+ "vqadd.s16 q0, q0, q4 \n" /* G */ \
+ "vqadd.s16 q8, q8, q1 \n" /* B */ \
+ "vqadd.s16 q9, q9, q10 \n" /* R */ \
+ "vqsub.s16 q0, q0, q3 \n" /* G */ \
+ "vqshrun.s16 d20, q8, #6 \n" /* B */ \
+ "vqshrun.s16 d22, q9, #6 \n" /* R */ \
+ "vqshrun.s16 d21, q0, #6 \n" /* G */
+
+// YUV to RGB conversion constants.
+// Y contribution to R,G,B. Scale and bias.
+#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
+#define YGB 1160 /* 1.164 * 64 * 16 - adjusted for even error distribution */
+
+// U and V contributions to R,G,B.
+#define UB -128 /* -min(128, round(2.018 * 64)) */
+#define UG 25 /* -round(-0.391 * 64) */
+#define VG 52 /* -round(-0.813 * 64) */
+#define VR -102 /* -round(1.596 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BB (UB * 128 - YGB)
+#define BG (UG * 128 + VG * 128 - YGB)
+#define BR (VR * 128 - YGB)
+
+static uvec8 kUVToRB = { 128, 128, 128, 128, 102, 102, 102, 102,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+static uvec8 kUVToG = { 25, 25, 25, 25, 52, 52, 52, 52,
+ 0, 0, 0, 0, 0, 0, 0, 0 };
+static vec16 kUVBiasBGR = { BB, BG, BR, 0, 0, 0, 0, 0 };
+static vec32 kYToRgb = { 0x0101 * YG, 0, 0, 0 };
+
+#undef YG
+#undef YGB
+#undef UB
+#undef UG
+#undef VG
+#undef VR
+#undef BB
+#undef BG
+#undef BR
+
+void I444ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV444
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(3)
+ "vst4.8 {d20, d21, d22, d23}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I422ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(3)
+ "vst4.8 {d20, d21, d22, d23}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I411ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV411
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(3)
+ "vst4.8 {d20, d21, d22, d23}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I422ToBGRARow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_bgra,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vswp.u8 d20, d22 \n"
+ "vmov.u8 d19, #255 \n"
+ MEMACCESS(3)
+ "vst4.8 {d19, d20, d21, d22}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_bgra), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I422ToABGRRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_abgr,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vswp.u8 d20, d22 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(3)
+ "vst4.8 {d20, d21, d22, d23}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_abgr), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I422ToRGBARow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vmov.u8 d19, #255 \n"
+ MEMACCESS(3)
+ "vst4.8 {d19, d20, d21, d22}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_rgba), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I422ToRGB24Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ MEMACCESS(3)
+ "vst3.8 {d20, d21, d22}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_rgb24), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I422ToRAWRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vswp.u8 d20, d22 \n"
+ MEMACCESS(3)
+ "vst3.8 {d20, d21, d22}, [%3]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_raw), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+#define ARGBTORGB565 \
+ "vshr.u8 d20, d20, #3 \n" /* B */ \
+ "vshr.u8 d21, d21, #2 \n" /* G */ \
+ "vshr.u8 d22, d22, #3 \n" /* R */ \
+ "vmovl.u8 q8, d20 \n" /* B */ \
+ "vmovl.u8 q9, d21 \n" /* G */ \
+ "vmovl.u8 q10, d22 \n" /* R */ \
+ "vshl.u16 q9, q9, #5 \n" /* G */ \
+ "vshl.u16 q10, q10, #11 \n" /* R */ \
+ "vorr q0, q8, q9 \n" /* BG */ \
+ "vorr q0, q0, q10 \n" /* BGR */
+
+void I422ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ ARGBTORGB565
+ MEMACCESS(3)
+ "vst1.8 {q0}, [%3]! \n" // store 8 pixels RGB565.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_rgb565), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+#define ARGBTOARGB1555 \
+ "vshr.u8 q10, q10, #3 \n" /* B */ \
+ "vshr.u8 d22, d22, #3 \n" /* R */ \
+ "vshr.u8 d23, d23, #7 \n" /* A */ \
+ "vmovl.u8 q8, d20 \n" /* B */ \
+ "vmovl.u8 q9, d21 \n" /* G */ \
+ "vmovl.u8 q10, d22 \n" /* R */ \
+ "vmovl.u8 q11, d23 \n" /* A */ \
+ "vshl.u16 q9, q9, #5 \n" /* G */ \
+ "vshl.u16 q10, q10, #10 \n" /* R */ \
+ "vshl.u16 q11, q11, #15 \n" /* A */ \
+ "vorr q0, q8, q9 \n" /* BG */ \
+ "vorr q1, q10, q11 \n" /* RA */ \
+ "vorr q0, q0, q1 \n" /* BGRA */
+
+void I422ToARGB1555Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb1555,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ ARGBTOARGB1555
+ MEMACCESS(3)
+ "vst1.8 {q0}, [%3]! \n" // store 8 pixels ARGB1555.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb1555), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+#define ARGBTOARGB4444 \
+ "vshr.u8 d20, d20, #4 \n" /* B */ \
+ "vbic.32 d21, d21, d4 \n" /* G */ \
+ "vshr.u8 d22, d22, #4 \n" /* R */ \
+ "vbic.32 d23, d23, d4 \n" /* A */ \
+ "vorr d0, d20, d21 \n" /* BG */ \
+ "vorr d1, d22, d23 \n" /* RA */ \
+ "vzip.u8 d0, d1 \n" /* BGRA */
+
+void I422ToARGB4444Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "vmov.u8 d4, #0x0f \n" // bits to clear with vbic.
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV422
+ YUV422TORGB
+ "subs %4, %4, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ ARGBTOARGB4444
+ MEMACCESS(3)
+ "vst1.8 {q0}, [%3]! \n" // store 8 pixels ARGB4444.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb4444), // %3
+ "+r"(width) // %4
+ : [kUVToRB]"r"(&kUVToRB), // %5
+ [kUVToG]"r"(&kUVToG), // %6
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void I400ToARGBRow_NEON(const uint8* src_y,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUV400
+ YUV422TORGB
+ "subs %2, %2, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(1)
+ "vst4.8 {d20, d21, d22, d23}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : [kUVToRB]"r"(&kUVToRB), // %3
+ [kUVToG]"r"(&kUVToG), // %4
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void J400ToARGBRow_NEON(const uint8* src_y,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "vmov.u8 d23, #255 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d20}, [%0]! \n"
+ "vmov d21, d20 \n"
+ "vmov d22, d20 \n"
+ "subs %2, %2, #8 \n"
+ MEMACCESS(1)
+ "vst4.8 {d20, d21, d22, d23}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "d20", "d21", "d22", "d23"
+ );
+}
+
+void NV12ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READNV12
+ YUV422TORGB
+ "subs %3, %3, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(2)
+ "vst4.8 {d20, d21, d22, d23}, [%2]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ : [kUVToRB]"r"(&kUVToRB), // %4
+ [kUVToG]"r"(&kUVToG), // %5
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void NV21ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READNV21
+ YUV422TORGB
+ "subs %3, %3, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(2)
+ "vst4.8 {d20, d21, d22, d23}, [%2]! \n"
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ : [kUVToRB]"r"(&kUVToRB), // %4
+ [kUVToG]"r"(&kUVToG), // %5
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void NV12ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_rgb565,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READNV12
+ YUV422TORGB
+ "subs %3, %3, #8 \n"
+ ARGBTORGB565
+ MEMACCESS(2)
+ "vst1.8 {q0}, [%2]! \n" // store 8 pixels RGB565.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_rgb565), // %2
+ "+r"(width) // %3
+ : [kUVToRB]"r"(&kUVToRB), // %4
+ [kUVToG]"r"(&kUVToG), // %5
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void NV21ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_rgb565,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READNV21
+ YUV422TORGB
+ "subs %3, %3, #8 \n"
+ ARGBTORGB565
+ MEMACCESS(2)
+ "vst1.8 {q0}, [%2]! \n" // store 8 pixels RGB565.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_rgb565), // %2
+ "+r"(width) // %3
+ : [kUVToRB]"r"(&kUVToRB), // %4
+ [kUVToG]"r"(&kUVToG), // %5
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void YUY2ToARGBRow_NEON(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READYUY2
+ YUV422TORGB
+ "subs %2, %2, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(1)
+ "vst4.8 {d20, d21, d22, d23}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : [kUVToRB]"r"(&kUVToRB), // %3
+ [kUVToG]"r"(&kUVToG), // %4
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void UYVYToARGBRow_NEON(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ ".p2align 2 \n"
+ "1: \n"
+ READUYVY
+ YUV422TORGB
+ "subs %2, %2, #8 \n"
+ "vmov.u8 d23, #255 \n"
+ MEMACCESS(1)
+ "vst4.8 {d20, d21, d22, d23}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : [kUVToRB]"r"(&kUVToRB), // %3
+ [kUVToG]"r"(&kUVToG), // %4
+ [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// Reads 16 pairs of UV and write even values to dst_u and odd to dst_v.
+void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld2.8 {q0, q1}, [%0]! \n" // load 16 pairs of UV
+ "subs %3, %3, #16 \n" // 16 processed per loop
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n" // store U
+ MEMACCESS(2)
+ "vst1.8 {q1}, [%2]! \n" // store V
+ "bgt 1b \n"
+ : "+r"(src_uv), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(width) // %3 // Output registers
+ : // Input registers
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+
+// Reads 16 U's and V's and writes out 16 pairs of UV.
+void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load U
+ MEMACCESS(1)
+ "vld1.8 {q1}, [%1]! \n" // load V
+ "subs %3, %3, #16 \n" // 16 processed per loop
+ MEMACCESS(2)
+ "vst2.u8 {q0, q1}, [%2]! \n" // store 16 pairs of UV
+ "bgt 1b \n"
+ :
+ "+r"(src_u), // %0
+ "+r"(src_v), // %1
+ "+r"(dst_uv), // %2
+ "+r"(width) // %3 // Output registers
+ : // Input registers
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+
+// Copy multiple of 32. vld4.8 allow unaligned and is fastest on a15.
+void CopyRow_NEON(const uint8* src, uint8* dst, int count) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d0, d1, d2, d3}, [%0]! \n" // load 32
+ "subs %2, %2, #32 \n" // 32 processed per loop
+ MEMACCESS(1)
+ "vst1.8 {d0, d1, d2, d3}, [%1]! \n" // store 32
+ "bgt 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(count) // %2 // Output registers
+ : // Input registers
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+
+// SetRow writes 'count' bytes using an 8 bit value repeated.
+void SetRow_NEON(uint8* dst, uint8 v8, int count) {
+ asm volatile (
+ "vdup.8 q0, %2 \n" // duplicate 16 bytes
+ "1: \n"
+ "subs %1, %1, #16 \n" // 16 bytes per loop
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n" // store
+ "bgt 1b \n"
+ : "+r"(dst), // %0
+ "+r"(count) // %1
+ : "r"(v8) // %2
+ : "cc", "memory", "q0"
+ );
+}
+
+// ARGBSetRow writes 'count' pixels using an 32 bit value repeated.
+void ARGBSetRow_NEON(uint8* dst, uint32 v32, int count) {
+ asm volatile (
+ "vdup.u32 q0, %2 \n" // duplicate 4 ints
+ "1: \n"
+ "subs %1, %1, #4 \n" // 4 pixels per loop
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n" // store
+ "bgt 1b \n"
+ : "+r"(dst), // %0
+ "+r"(count) // %1
+ : "r"(v32) // %2
+ : "cc", "memory", "q0"
+ );
+}
+
+void MirrorRow_NEON(const uint8* src, uint8* dst, int width) {
+ asm volatile (
+ // Start at end of source row.
+ "mov r3, #-16 \n"
+ "add %0, %0, %2 \n"
+ "sub %0, #16 \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0], r3 \n" // src -= 16
+ "subs %2, #16 \n" // 16 pixels per loop.
+ "vrev64.8 q0, q0 \n"
+ MEMACCESS(1)
+ "vst1.8 {d1}, [%1]! \n" // dst += 16
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "r3", "q0"
+ );
+}
+
+void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width) {
+ asm volatile (
+ // Start at end of source row.
+ "mov r12, #-16 \n"
+ "add %0, %0, %3, lsl #1 \n"
+ "sub %0, #16 \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld2.8 {d0, d1}, [%0], r12 \n" // src -= 16
+ "subs %3, #8 \n" // 8 pixels per loop.
+ "vrev64.8 q0, q0 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // dst += 8
+ MEMACCESS(2)
+ "vst1.8 {d1}, [%2]! \n"
+ "bgt 1b \n"
+ : "+r"(src_uv), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "r12", "q0"
+ );
+}
+
+void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) {
+ asm volatile (
+ // Start at end of source row.
+ "mov r3, #-16 \n"
+ "add %0, %0, %2, lsl #2 \n"
+ "sub %0, #16 \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0], r3 \n" // src -= 16
+ "subs %2, #4 \n" // 4 pixels per loop.
+ "vrev64.32 q0, q0 \n"
+ MEMACCESS(1)
+ "vst1.8 {d1}, [%1]! \n" // dst += 16
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "r3", "q0"
+ );
+}
+
+void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #255 \n" // Alpha
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld3.8 {d1, d2, d3}, [%0]! \n" // load 8 pixels of RGB24.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ MEMACCESS(1)
+ "vst4.8 {d1, d2, d3, d4}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List
+ );
+}
+
+void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #255 \n" // Alpha
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld3.8 {d1, d2, d3}, [%0]! \n" // load 8 pixels of RAW.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vswp.u8 d1, d3 \n" // swap R, B
+ MEMACCESS(1)
+ "vst4.8 {d1, d2, d3, d4}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List
+ );
+}
+
+#define RGB565TOARGB \
+ "vshrn.u16 d6, q0, #5 \n" /* G xxGGGGGG */ \
+ "vuzp.u8 d0, d1 \n" /* d0 xxxBBBBB RRRRRxxx */ \
+ "vshl.u8 d6, d6, #2 \n" /* G GGGGGG00 upper 6 */ \
+ "vshr.u8 d1, d1, #3 \n" /* R 000RRRRR lower 5 */ \
+ "vshl.u8 q0, q0, #3 \n" /* B,R BBBBB000 upper 5 */ \
+ "vshr.u8 q2, q0, #5 \n" /* B,R 00000BBB lower 3 */ \
+ "vorr.u8 d0, d0, d4 \n" /* B */ \
+ "vshr.u8 d4, d6, #6 \n" /* G 000000GG lower 2 */ \
+ "vorr.u8 d2, d1, d5 \n" /* R */ \
+ "vorr.u8 d1, d4, d6 \n" /* G */
+
+void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) {
+ asm volatile (
+ "vmov.u8 d3, #255 \n" // Alpha
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 RGB565 pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ RGB565TOARGB
+ MEMACCESS(1)
+ "vst4.8 {d0, d1, d2, d3}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_rgb565), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber List
+ );
+}
+
+#define ARGB1555TOARGB \
+ "vshrn.u16 d7, q0, #8 \n" /* A Arrrrrxx */ \
+ "vshr.u8 d6, d7, #2 \n" /* R xxxRRRRR */ \
+ "vshrn.u16 d5, q0, #5 \n" /* G xxxGGGGG */ \
+ "vmovn.u16 d4, q0 \n" /* B xxxBBBBB */ \
+ "vshr.u8 d7, d7, #7 \n" /* A 0000000A */ \
+ "vneg.s8 d7, d7 \n" /* A AAAAAAAA upper 8 */ \
+ "vshl.u8 d6, d6, #3 \n" /* R RRRRR000 upper 5 */ \
+ "vshr.u8 q1, q3, #5 \n" /* R,A 00000RRR lower 3 */ \
+ "vshl.u8 q0, q2, #3 \n" /* B,G BBBBB000 upper 5 */ \
+ "vshr.u8 q2, q0, #5 \n" /* B,G 00000BBB lower 3 */ \
+ "vorr.u8 q1, q1, q3 \n" /* R,A */ \
+ "vorr.u8 q0, q0, q2 \n" /* B,G */ \
+
+// RGB555TOARGB is same as ARGB1555TOARGB but ignores alpha.
+#define RGB555TOARGB \
+ "vshrn.u16 d6, q0, #5 \n" /* G xxxGGGGG */ \
+ "vuzp.u8 d0, d1 \n" /* d0 xxxBBBBB xRRRRRxx */ \
+ "vshl.u8 d6, d6, #3 \n" /* G GGGGG000 upper 5 */ \
+ "vshr.u8 d1, d1, #2 \n" /* R 00xRRRRR lower 5 */ \
+ "vshl.u8 q0, q0, #3 \n" /* B,R BBBBB000 upper 5 */ \
+ "vshr.u8 q2, q0, #5 \n" /* B,R 00000BBB lower 3 */ \
+ "vorr.u8 d0, d0, d4 \n" /* B */ \
+ "vshr.u8 d4, d6, #5 \n" /* G 00000GGG lower 3 */ \
+ "vorr.u8 d2, d1, d5 \n" /* R */ \
+ "vorr.u8 d1, d4, d6 \n" /* G */
+
+void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb,
+ int pix) {
+ asm volatile (
+ "vmov.u8 d3, #255 \n" // Alpha
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 ARGB1555 pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGB1555TOARGB
+ MEMACCESS(1)
+ "vst4.8 {d0, d1, d2, d3}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_argb1555), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3" // Clobber List
+ );
+}
+
+#define ARGB4444TOARGB \
+ "vuzp.u8 d0, d1 \n" /* d0 BG, d1 RA */ \
+ "vshl.u8 q2, q0, #4 \n" /* B,R BBBB0000 */ \
+ "vshr.u8 q1, q0, #4 \n" /* G,A 0000GGGG */ \
+ "vshr.u8 q0, q2, #4 \n" /* B,R 0000BBBB */ \
+ "vorr.u8 q0, q0, q2 \n" /* B,R BBBBBBBB */ \
+ "vshl.u8 q2, q1, #4 \n" /* G,A GGGG0000 */ \
+ "vorr.u8 q1, q1, q2 \n" /* G,A GGGGGGGG */ \
+ "vswp.u8 d1, d2 \n" /* B,R,G,A -> B,G,R,A */
+
+void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb,
+ int pix) {
+ asm volatile (
+ "vmov.u8 d3, #255 \n" // Alpha
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 ARGB4444 pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGB4444TOARGB
+ MEMACCESS(1)
+ "vst4.8 {d0, d1, d2, d3}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_argb4444), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2" // Clobber List
+ );
+}
+
+void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d1, d2, d3, d4}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ MEMACCESS(1)
+ "vst3.8 {d1, d2, d3}, [%1]! \n" // store 8 pixels of RGB24.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_rgb24), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List
+ );
+}
+
+void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d1, d2, d3, d4}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vswp.u8 d1, d3 \n" // swap R, B
+ MEMACCESS(1)
+ "vst3.8 {d1, d2, d3}, [%1]! \n" // store 8 pixels of RAW.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_raw), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d1", "d2", "d3", "d4" // Clobber List
+ );
+}
+
+void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld2.8 {q0, q1}, [%0]! \n" // load 16 pixels of YUY2.
+ "subs %2, %2, #16 \n" // 16 processed per loop.
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n" // store 16 pixels of Y.
+ "bgt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+
+void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld2.8 {q0, q1}, [%0]! \n" // load 16 pixels of UYVY.
+ "subs %2, %2, #16 \n" // 16 processed per loop.
+ MEMACCESS(1)
+ "vst1.8 {q1}, [%1]! \n" // store 16 pixels of Y.
+ "bgt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+
+void YUY2ToUV422Row_NEON(const uint8* src_yuy2, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of YUY2.
+ "subs %3, %3, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "vst1.8 {d1}, [%1]! \n" // store 8 U.
+ MEMACCESS(2)
+ "vst1.8 {d3}, [%2]! \n" // store 8 V.
+ "bgt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3" // Clobber List
+ );
+}
+
+void UYVYToUV422Row_NEON(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of UYVY.
+ "subs %3, %3, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 U.
+ MEMACCESS(2)
+ "vst1.8 {d2}, [%2]! \n" // store 8 V.
+ "bgt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3" // Clobber List
+ );
+}
+
+void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // stride + src_yuy2
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of YUY2.
+ "subs %4, %4, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "vld4.8 {d4, d5, d6, d7}, [%1]! \n" // load next row YUY2.
+ "vrhadd.u8 d1, d1, d5 \n" // average rows of U
+ "vrhadd.u8 d3, d3, d7 \n" // average rows of V
+ MEMACCESS(2)
+ "vst1.8 {d1}, [%2]! \n" // store 8 U.
+ MEMACCESS(3)
+ "vst1.8 {d3}, [%3]! \n" // store 8 V.
+ "bgt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(stride_yuy2), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" // Clobber List
+ );
+}
+
+void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // stride + src_uyvy
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 16 pixels of UYVY.
+ "subs %4, %4, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "vld4.8 {d4, d5, d6, d7}, [%1]! \n" // load next row UYVY.
+ "vrhadd.u8 d0, d0, d4 \n" // average rows of U
+ "vrhadd.u8 d2, d2, d6 \n" // average rows of V
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 U.
+ MEMACCESS(3)
+ "vst1.8 {d2}, [%3]! \n" // store 8 V.
+ "bgt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(stride_uyvy), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7" // Clobber List
+ );
+}
+
+// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
+void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ asm volatile (
+ MEMACCESS(3)
+ "vld1.8 {q2}, [%3] \n" // shuffler
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 4 pixels.
+ "subs %2, %2, #4 \n" // 4 processed per loop
+ "vtbl.8 d2, {d0, d1}, d4 \n" // look up 2 first pixels
+ "vtbl.8 d3, {d0, d1}, d5 \n" // look up 2 next pixels
+ MEMACCESS(1)
+ "vst1.8 {q1}, [%1]! \n" // store 4.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ : "r"(shuffler) // %3
+ : "cc", "memory", "q0", "q1", "q2" // Clobber List
+ );
+}
+
+void I422ToYUY2Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld2.8 {d0, d2}, [%0]! \n" // load 16 Ys
+ MEMACCESS(1)
+ "vld1.8 {d1}, [%1]! \n" // load 8 Us
+ MEMACCESS(2)
+ "vld1.8 {d3}, [%2]! \n" // load 8 Vs
+ "subs %4, %4, #16 \n" // 16 pixels
+ MEMACCESS(3)
+ "vst4.8 {d0, d1, d2, d3}, [%3]! \n" // Store 8 YUY2/16 pixels.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_yuy2), // %3
+ "+r"(width) // %4
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3"
+ );
+}
+
+void I422ToUYVYRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld2.8 {d1, d3}, [%0]! \n" // load 16 Ys
+ MEMACCESS(1)
+ "vld1.8 {d0}, [%1]! \n" // load 8 Us
+ MEMACCESS(2)
+ "vld1.8 {d2}, [%2]! \n" // load 8 Vs
+ "subs %4, %4, #16 \n" // 16 pixels
+ MEMACCESS(3)
+ "vst4.8 {d0, d1, d2, d3}, [%3]! \n" // Store 8 UYVY/16 pixels.
+ "bgt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_uyvy), // %3
+ "+r"(width) // %4
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3"
+ );
+}
+
+void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d20, d21, d22, d23}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGBTORGB565
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n" // store 8 pixels RGB565.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_rgb565), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q8", "q9", "q10", "q11"
+ );
+}
+
+void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "vdup.32 d2, %2 \n" // dither4
+ "1: \n"
+ MEMACCESS(1)
+ "vld4.8 {d20, d21, d22, d23}, [%1]! \n" // load 8 pixels of ARGB.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vqadd.u8 d20, d20, d2 \n"
+ "vqadd.u8 d21, d21, d2 \n"
+ "vqadd.u8 d22, d22, d2 \n"
+ ARGBTORGB565
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n" // store 8 pixels RGB565.
+ "bgt 1b \n"
+ : "+r"(dst_rgb) // %0
+ : "r"(src_argb), // %1
+ "r"(dither4), // %2
+ "r"(width) // %3
+ : "cc", "memory", "q0", "q1", "q8", "q9", "q10", "q11"
+ );
+}
+
+void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_argb1555,
+ int pix) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d20, d21, d22, d23}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGBTOARGB1555
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n" // store 8 pixels ARGB1555.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb1555), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q8", "q9", "q10", "q11"
+ );
+}
+
+void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_argb4444,
+ int pix) {
+ asm volatile (
+ "vmov.u8 d4, #0x0f \n" // bits to clear with vbic.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d20, d21, d22, d23}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGBTOARGB4444
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n" // store 8 pixels ARGB4444.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb4444), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q8", "q9", "q10", "q11"
+ );
+}
+
+void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d27, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlal.u8 q2, d1, d25 \n" // G
+ "vmlal.u8 q2, d2, d26 \n" // R
+ "vqrshrun.s16 d0, q2, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d27 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q12", "q13"
+ );
+}
+
+void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d24, #15 \n" // B * 0.11400 coefficient
+ "vmov.u8 d25, #75 \n" // G * 0.58700 coefficient
+ "vmov.u8 d26, #38 \n" // R * 0.29900 coefficient
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlal.u8 q2, d1, d25 \n" // G
+ "vmlal.u8 q2, d2, d26 \n" // R
+ "vqrshrun.s16 d0, q2, #7 \n" // 15 bit to 8 bit Y
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q12", "q13"
+ );
+}
+
+// 8x1 pixels.
+void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ "vmov.u8 d24, #112 \n" // UB / VR 0.875 coefficient
+ "vmov.u8 d25, #74 \n" // UG -0.5781 coefficient
+ "vmov.u8 d26, #38 \n" // UR -0.2969 coefficient
+ "vmov.u8 d27, #18 \n" // VB -0.1406 coefficient
+ "vmov.u8 d28, #94 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlsl.u8 q2, d1, d25 \n" // G
+ "vmlsl.u8 q2, d2, d26 \n" // R
+ "vadd.u16 q2, q2, q15 \n" // +128 -> unsigned
+
+ "vmull.u8 q3, d2, d24 \n" // R
+ "vmlsl.u8 q3, d1, d28 \n" // G
+ "vmlsl.u8 q3, d0, d27 \n" // B
+ "vadd.u16 q3, q3, q15 \n" // +128 -> unsigned
+
+ "vqshrn.u16 d0, q2, #8 \n" // 16 bit to 8 bit U
+ "vqshrn.u16 d1, q3, #8 \n" // 16 bit to 8 bit V
+
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels U.
+ MEMACCESS(2)
+ "vst1.8 {d1}, [%2]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q12", "q13", "q14", "q15"
+ );
+}
+
+// 16x1 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
+
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+
+ "subs %3, %3, #16 \n" // 16 processed per loop.
+ "vmul.s16 q8, q0, q10 \n" // B
+ "vmls.s16 q8, q1, q11 \n" // G
+ "vmls.s16 q8, q2, q12 \n" // R
+ "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned
+
+ "vmul.s16 q9, q2, q10 \n" // R
+ "vmls.s16 q9, q1, q14 \n" // G
+ "vmls.s16 q9, q0, q13 \n" // B
+ "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned
+
+ "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U
+ "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V
+
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels U.
+ MEMACCESS(2)
+ "vst1.8 {d1}, [%2]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// 32x1 pixels -> 8x1. pix is number of argb pixels. e.g. 32.
+void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(0)
+ "vld4.8 {d8, d10, d12, d14}, [%0]! \n" // load 8 more ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d9, d11, d13, d15}, [%0]! \n" // load last 8 ARGB pixels.
+ "vpaddl.u8 q4, q4 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q5, q5 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q6, q6 \n" // R 16 bytes -> 8 shorts.
+
+ "vpadd.u16 d0, d0, d1 \n" // B 16 shorts -> 8 shorts.
+ "vpadd.u16 d1, d8, d9 \n" // B
+ "vpadd.u16 d2, d2, d3 \n" // G 16 shorts -> 8 shorts.
+ "vpadd.u16 d3, d10, d11 \n" // G
+ "vpadd.u16 d4, d4, d5 \n" // R 16 shorts -> 8 shorts.
+ "vpadd.u16 d5, d12, d13 \n" // R
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %3, %3, #32 \n" // 32 processed per loop.
+ "vmul.s16 q8, q0, q10 \n" // B
+ "vmls.s16 q8, q1, q11 \n" // G
+ "vmls.s16 q8, q2, q12 \n" // R
+ "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned
+ "vmul.s16 q9, q2, q10 \n" // R
+ "vmls.s16 q9, q1, q14 \n" // G
+ "vmls.s16 q9, q0, q13 \n" // B
+ "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned
+ "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U
+ "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels U.
+ MEMACCESS(2)
+ "vst1.8 {d1}, [%2]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+#define RGBTOUV(QB, QG, QR) \
+ "vmul.s16 q8, " #QB ", q10 \n" /* B */ \
+ "vmls.s16 q8, " #QG ", q11 \n" /* G */ \
+ "vmls.s16 q8, " #QR ", q12 \n" /* R */ \
+ "vadd.u16 q8, q8, q15 \n" /* +128 -> unsigned */ \
+ "vmul.s16 q9, " #QR ", q10 \n" /* R */ \
+ "vmls.s16 q9, " #QG ", q14 \n" /* G */ \
+ "vmls.s16 q9, " #QB ", q13 \n" /* B */ \
+ "vadd.u16 q9, q9, q15 \n" /* +128 -> unsigned */ \
+ "vqshrn.u16 d0, q8, #8 \n" /* 16 bit to 8 bit U */ \
+ "vqshrn.u16 d1, q9, #8 \n" /* 16 bit to 8 bit V */
+
+// TODO(fbarchard): Consider vhadd vertical, then vpaddl horizontal, avoid shr.
+void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_argb
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld4.8 {d8, d10, d12, d14}, [%1]! \n" // load 8 more ARGB pixels.
+ MEMACCESS(1)
+ "vld4.8 {d9, d11, d13, d15}, [%1]! \n" // load last 8 ARGB pixels.
+ "vpadal.u8 q0, q4 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q2, q6 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q0, q1, q2)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_stride_argb), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// TODO(fbarchard): Subsample match C code.
+void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_argb
+ "vmov.s16 q10, #127 / 2 \n" // UB / VR 0.500 coefficient
+ "vmov.s16 q11, #84 / 2 \n" // UG -0.33126 coefficient
+ "vmov.s16 q12, #43 / 2 \n" // UR -0.16874 coefficient
+ "vmov.s16 q13, #20 / 2 \n" // VB -0.08131 coefficient
+ "vmov.s16 q14, #107 / 2 \n" // VG -0.41869 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld4.8 {d8, d10, d12, d14}, [%1]! \n" // load 8 more ARGB pixels.
+ MEMACCESS(1)
+ "vld4.8 {d9, d11, d13, d15}, [%1]! \n" // load last 8 ARGB pixels.
+ "vpadal.u8 q0, q4 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q2, q6 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q0, q1, q2)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_stride_argb), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_bgra
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 BGRA pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 BGRA pixels.
+ "vpaddl.u8 q3, q3 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld4.8 {d8, d10, d12, d14}, [%1]! \n" // load 8 more BGRA pixels.
+ MEMACCESS(1)
+ "vld4.8 {d9, d11, d13, d15}, [%1]! \n" // load last 8 BGRA pixels.
+ "vpadal.u8 q3, q7 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q2, q6 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q5 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q1, q1, #1 \n" // 2x average
+ "vrshr.u16 q2, q2, #1 \n"
+ "vrshr.u16 q3, q3, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q3, q2, q1)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_bgra), // %0
+ "+r"(src_stride_bgra), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_abgr
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ABGR pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ABGR pixels.
+ "vpaddl.u8 q2, q2 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q0, q0 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld4.8 {d8, d10, d12, d14}, [%1]! \n" // load 8 more ABGR pixels.
+ MEMACCESS(1)
+ "vld4.8 {d9, d11, d13, d15}, [%1]! \n" // load last 8 ABGR pixels.
+ "vpadal.u8 q2, q6 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q0, q4 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q2, q1, q0)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_abgr), // %0
+ "+r"(src_stride_abgr), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_rgba
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 RGBA pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 RGBA pixels.
+ "vpaddl.u8 q0, q1 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q2 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q3 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld4.8 {d8, d10, d12, d14}, [%1]! \n" // load 8 more RGBA pixels.
+ MEMACCESS(1)
+ "vld4.8 {d9, d11, d13, d15}, [%1]! \n" // load last 8 RGBA pixels.
+ "vpadal.u8 q0, q5 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q6 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q2, q7 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q0, q1, q2)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_rgba), // %0
+ "+r"(src_stride_rgba), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_rgb24
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld3.8 {d0, d2, d4}, [%0]! \n" // load 8 RGB24 pixels.
+ MEMACCESS(0)
+ "vld3.8 {d1, d3, d5}, [%0]! \n" // load next 8 RGB24 pixels.
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld3.8 {d8, d10, d12}, [%1]! \n" // load 8 more RGB24 pixels.
+ MEMACCESS(1)
+ "vld3.8 {d9, d11, d13}, [%1]! \n" // load last 8 RGB24 pixels.
+ "vpadal.u8 q0, q4 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q2, q6 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q0, q1, q2)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(src_stride_rgb24), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_raw
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld3.8 {d0, d2, d4}, [%0]! \n" // load 8 RAW pixels.
+ MEMACCESS(0)
+ "vld3.8 {d1, d3, d5}, [%0]! \n" // load next 8 RAW pixels.
+ "vpaddl.u8 q2, q2 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q0, q0 \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld3.8 {d8, d10, d12}, [%1]! \n" // load 8 more RAW pixels.
+ MEMACCESS(1)
+ "vld3.8 {d9, d11, d13}, [%1]! \n" // load last 8 RAW pixels.
+ "vpadal.u8 q2, q6 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q5 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q0, q4 \n" // R 16 bytes -> 8 shorts.
+
+ "vrshr.u16 q0, q0, #1 \n" // 2x average
+ "vrshr.u16 q1, q1, #1 \n"
+ "vrshr.u16 q2, q2, #1 \n"
+
+ "subs %4, %4, #16 \n" // 32 processed per loop.
+ RGBTOUV(q2, q1, q0)
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(src_stride_raw), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_argb
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 RGB565 pixels.
+ RGB565TOARGB
+ "vpaddl.u8 d8, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpaddl.u8 d10, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpaddl.u8 d12, d2 \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // next 8 RGB565 pixels.
+ RGB565TOARGB
+ "vpaddl.u8 d9, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpaddl.u8 d11, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpaddl.u8 d13, d2 \n" // R 8 bytes -> 4 shorts.
+
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n" // load 8 RGB565 pixels.
+ RGB565TOARGB
+ "vpadal.u8 d8, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpadal.u8 d10, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpadal.u8 d12, d2 \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n" // next 8 RGB565 pixels.
+ RGB565TOARGB
+ "vpadal.u8 d9, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpadal.u8 d11, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpadal.u8 d13, d2 \n" // R 8 bytes -> 4 shorts.
+
+ "vrshr.u16 q4, q4, #1 \n" // 2x average
+ "vrshr.u16 q5, q5, #1 \n"
+ "vrshr.u16 q6, q6, #1 \n"
+
+ "subs %4, %4, #16 \n" // 16 processed per loop.
+ "vmul.s16 q8, q4, q10 \n" // B
+ "vmls.s16 q8, q5, q11 \n" // G
+ "vmls.s16 q8, q6, q12 \n" // R
+ "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned
+ "vmul.s16 q9, q6, q10 \n" // R
+ "vmls.s16 q9, q5, q14 \n" // G
+ "vmls.s16 q9, q4, q13 \n" // B
+ "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned
+ "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U
+ "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_rgb565), // %0
+ "+r"(src_stride_rgb565), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_argb
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "vpaddl.u8 d8, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpaddl.u8 d10, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpaddl.u8 d12, d2 \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // next 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "vpaddl.u8 d9, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpaddl.u8 d11, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpaddl.u8 d13, d2 \n" // R 8 bytes -> 4 shorts.
+
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n" // load 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "vpadal.u8 d8, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpadal.u8 d10, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpadal.u8 d12, d2 \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n" // next 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "vpadal.u8 d9, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpadal.u8 d11, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpadal.u8 d13, d2 \n" // R 8 bytes -> 4 shorts.
+
+ "vrshr.u16 q4, q4, #1 \n" // 2x average
+ "vrshr.u16 q5, q5, #1 \n"
+ "vrshr.u16 q6, q6, #1 \n"
+
+ "subs %4, %4, #16 \n" // 16 processed per loop.
+ "vmul.s16 q8, q4, q10 \n" // B
+ "vmls.s16 q8, q5, q11 \n" // G
+ "vmls.s16 q8, q6, q12 \n" // R
+ "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned
+ "vmul.s16 q9, q6, q10 \n" // R
+ "vmls.s16 q9, q5, q14 \n" // G
+ "vmls.s16 q9, q4, q13 \n" // B
+ "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned
+ "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U
+ "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb1555), // %0
+ "+r"(src_stride_argb1555), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ asm volatile (
+ "add %1, %0, %1 \n" // src_stride + src_argb
+ "vmov.s16 q10, #112 / 2 \n" // UB / VR 0.875 coefficient
+ "vmov.s16 q11, #74 / 2 \n" // UG -0.5781 coefficient
+ "vmov.s16 q12, #38 / 2 \n" // UR -0.2969 coefficient
+ "vmov.s16 q13, #18 / 2 \n" // VB -0.1406 coefficient
+ "vmov.s16 q14, #94 / 2 \n" // VG -0.7344 coefficient
+ "vmov.u16 q15, #0x8080 \n" // 128.5
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "vpaddl.u8 d8, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpaddl.u8 d10, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpaddl.u8 d12, d2 \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // next 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "vpaddl.u8 d9, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpaddl.u8 d11, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpaddl.u8 d13, d2 \n" // R 8 bytes -> 4 shorts.
+
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n" // load 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "vpadal.u8 d8, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpadal.u8 d10, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpadal.u8 d12, d2 \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n" // next 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "vpadal.u8 d9, d0 \n" // B 8 bytes -> 4 shorts.
+ "vpadal.u8 d11, d1 \n" // G 8 bytes -> 4 shorts.
+ "vpadal.u8 d13, d2 \n" // R 8 bytes -> 4 shorts.
+
+ "vrshr.u16 q4, q4, #1 \n" // 2x average
+ "vrshr.u16 q5, q5, #1 \n"
+ "vrshr.u16 q6, q6, #1 \n"
+
+ "subs %4, %4, #16 \n" // 16 processed per loop.
+ "vmul.s16 q8, q4, q10 \n" // B
+ "vmls.s16 q8, q5, q11 \n" // G
+ "vmls.s16 q8, q6, q12 \n" // R
+ "vadd.u16 q8, q8, q15 \n" // +128 -> unsigned
+ "vmul.s16 q9, q6, q10 \n" // R
+ "vmls.s16 q9, q5, q14 \n" // G
+ "vmls.s16 q9, q4, q13 \n" // B
+ "vadd.u16 q9, q9, q15 \n" // +128 -> unsigned
+ "vqshrn.u16 d0, q8, #8 \n" // 16 bit to 8 bit U
+ "vqshrn.u16 d1, q9, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "vst1.8 {d1}, [%3]! \n" // store 8 pixels V.
+ "bgt 1b \n"
+ : "+r"(src_argb4444), // %0
+ "+r"(src_stride_argb4444), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d27, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 RGB565 pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ RGB565TOARGB
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlal.u8 q2, d1, d25 \n" // G
+ "vmlal.u8 q2, d2, d26 \n" // R
+ "vqrshrun.s16 d0, q2, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d27 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_rgb565), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13"
+ );
+}
+
+void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d27, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 ARGB1555 pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGB1555TOARGB
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlal.u8 q2, d1, d25 \n" // G
+ "vmlal.u8 q2, d2, d26 \n" // R
+ "vqrshrun.s16 d0, q2, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d27 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_argb1555), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13"
+ );
+}
+
+void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d24, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d25, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d26, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d27, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 8 ARGB4444 pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ ARGB4444TOARGB
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlal.u8 q2, d1, d25 \n" // G
+ "vmlal.u8 q2, d2, d26 \n" // R
+ "vqrshrun.s16 d0, q2, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d27 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_argb4444), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q12", "q13"
+ );
+}
+
+void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d6, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d7, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of BGRA.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q8, d1, d4 \n" // R
+ "vmlal.u8 q8, d2, d5 \n" // G
+ "vmlal.u8 q8, d3, d6 \n" // B
+ "vqrshrun.s16 d0, q8, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d7 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_bgra), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"
+ );
+}
+
+void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d6, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d7, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of ABGR.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q8, d0, d4 \n" // R
+ "vmlal.u8 q8, d1, d5 \n" // G
+ "vmlal.u8 q8, d2, d6 \n" // B
+ "vqrshrun.s16 d0, q8, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d7 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_abgr), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"
+ );
+}
+
+void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d6, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d7, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of RGBA.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q8, d1, d4 \n" // B
+ "vmlal.u8 q8, d2, d5 \n" // G
+ "vmlal.u8 q8, d3, d6 \n" // R
+ "vqrshrun.s16 d0, q8, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d7 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_rgba), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"
+ );
+}
+
+void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d6, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d7, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RGB24.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q8, d0, d4 \n" // B
+ "vmlal.u8 q8, d1, d5 \n" // G
+ "vmlal.u8 q8, d2, d6 \n" // R
+ "vqrshrun.s16 d0, q8, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d7 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"
+ );
+}
+
+void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) {
+ asm volatile (
+ "vmov.u8 d4, #33 \n" // R * 0.2578 coefficient
+ "vmov.u8 d5, #65 \n" // G * 0.5078 coefficient
+ "vmov.u8 d6, #13 \n" // B * 0.1016 coefficient
+ "vmov.u8 d7, #16 \n" // Add 16 constant
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld3.8 {d0, d1, d2}, [%0]! \n" // load 8 pixels of RAW.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q8, d0, d4 \n" // B
+ "vmlal.u8 q8, d1, d5 \n" // G
+ "vmlal.u8 q8, d2, d6 \n" // R
+ "vqrshrun.s16 d0, q8, #7 \n" // 16 bit to 8 bit Y
+ "vqadd.u8 d0, d7 \n"
+ MEMACCESS(1)
+ "vst1.8 {d0}, [%1]! \n" // store 8 pixels Y.
+ "bgt 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "d0", "d1", "d2", "d3", "d4", "d5", "d6", "d7", "q8"
+ );
+}
+
+// Bilinear filter 16x2 -> 16x1
+void InterpolateRow_NEON(uint8* dst_ptr,
+ const uint8* src_ptr, ptrdiff_t src_stride,
+ int dst_width, int source_y_fraction) {
+ asm volatile (
+ "cmp %4, #0 \n"
+ "beq 100f \n"
+ "add %2, %1 \n"
+ "cmp %4, #64 \n"
+ "beq 75f \n"
+ "cmp %4, #128 \n"
+ "beq 50f \n"
+ "cmp %4, #192 \n"
+ "beq 25f \n"
+
+ "vdup.8 d5, %4 \n"
+ "rsb %4, #256 \n"
+ "vdup.8 d4, %4 \n"
+ // General purpose row blend.
+ "1: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q1}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vmull.u8 q13, d0, d4 \n"
+ "vmull.u8 q14, d1, d4 \n"
+ "vmlal.u8 q13, d2, d5 \n"
+ "vmlal.u8 q14, d3, d5 \n"
+ "vrshrn.u16 d0, q13, #8 \n"
+ "vrshrn.u16 d1, q14, #8 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 1b \n"
+ "b 99f \n"
+
+ // Blend 25 / 75.
+ "25: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q1}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vrhadd.u8 q0, q1 \n"
+ "vrhadd.u8 q0, q1 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 25b \n"
+ "b 99f \n"
+
+ // Blend 50 / 50.
+ "50: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q1}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vrhadd.u8 q0, q1 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 50b \n"
+ "b 99f \n"
+
+ // Blend 75 / 25.
+ "75: \n"
+ MEMACCESS(1)
+ "vld1.8 {q1}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q0}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vrhadd.u8 q0, q1 \n"
+ "vrhadd.u8 q0, q1 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 75b \n"
+ "b 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ "100: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ "subs %3, %3, #16 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 100b \n"
+
+ "99: \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(src_stride), // %2
+ "+r"(dst_width), // %3
+ "+r"(source_y_fraction) // %4
+ :
+ : "cc", "memory", "q0", "q1", "d4", "d5", "q13", "q14"
+ );
+}
+
+// dr * (256 - sa) / 256 + sr = dr - dr * sa / 256 + sr
+void ARGBBlendRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "subs %3, #8 \n"
+ "blt 89f \n"
+ // Blend 8 pixels.
+ "8: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of ARGB0.
+ MEMACCESS(1)
+ "vld4.8 {d4, d5, d6, d7}, [%1]! \n" // load 8 pixels of ARGB1.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vmull.u8 q10, d4, d3 \n" // db * a
+ "vmull.u8 q11, d5, d3 \n" // dg * a
+ "vmull.u8 q12, d6, d3 \n" // dr * a
+ "vqrshrn.u16 d20, q10, #8 \n" // db >>= 8
+ "vqrshrn.u16 d21, q11, #8 \n" // dg >>= 8
+ "vqrshrn.u16 d22, q12, #8 \n" // dr >>= 8
+ "vqsub.u8 q2, q2, q10 \n" // dbg - dbg * a / 256
+ "vqsub.u8 d6, d6, d22 \n" // dr - dr * a / 256
+ "vqadd.u8 q0, q0, q2 \n" // + sbg
+ "vqadd.u8 d2, d2, d6 \n" // + sr
+ "vmov.u8 d3, #255 \n" // a = 255
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 pixels of ARGB.
+ "bge 8b \n"
+
+ "89: \n"
+ "adds %3, #8-1 \n"
+ "blt 99f \n"
+
+ // Blend 1 pixels.
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0[0],d1[0],d2[0],d3[0]}, [%0]! \n" // load 1 pixel ARGB0.
+ MEMACCESS(1)
+ "vld4.8 {d4[0],d5[0],d6[0],d7[0]}, [%1]! \n" // load 1 pixel ARGB1.
+ "subs %3, %3, #1 \n" // 1 processed per loop.
+ "vmull.u8 q10, d4, d3 \n" // db * a
+ "vmull.u8 q11, d5, d3 \n" // dg * a
+ "vmull.u8 q12, d6, d3 \n" // dr * a
+ "vqrshrn.u16 d20, q10, #8 \n" // db >>= 8
+ "vqrshrn.u16 d21, q11, #8 \n" // dg >>= 8
+ "vqrshrn.u16 d22, q12, #8 \n" // dr >>= 8
+ "vqsub.u8 q2, q2, q10 \n" // dbg - dbg * a / 256
+ "vqsub.u8 d6, d6, d22 \n" // dr - dr * a / 256
+ "vqadd.u8 q0, q0, q2 \n" // + sbg
+ "vqadd.u8 d2, d2, d6 \n" // + sr
+ "vmov.u8 d3, #255 \n" // a = 255
+ MEMACCESS(2)
+ "vst4.8 {d0[0],d1[0],d2[0],d3[0]}, [%2]! \n" // store 1 pixel.
+ "bge 1b \n"
+
+ "99: \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q10", "q11", "q12"
+ );
+}
+
+// Attenuate 8 pixels at a time.
+void ARGBAttenuateRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ // Attenuate 8 pixels.
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q10, d0, d3 \n" // b * a
+ "vmull.u8 q11, d1, d3 \n" // g * a
+ "vmull.u8 q12, d2, d3 \n" // r * a
+ "vqrshrn.u16 d0, q10, #8 \n" // b >>= 8
+ "vqrshrn.u16 d1, q11, #8 \n" // g >>= 8
+ "vqrshrn.u16 d2, q12, #8 \n" // r >>= 8
+ MEMACCESS(1)
+ "vst4.8 {d0, d1, d2, d3}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q10", "q11", "q12"
+ );
+}
+
+// Quantize 8 ARGB pixels (32 bytes).
+// dst = (dst * scale >> 16) * interval_size + interval_offset;
+void ARGBQuantizeRow_NEON(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width) {
+ asm volatile (
+ "vdup.u16 q8, %2 \n"
+ "vshr.u16 q8, q8, #1 \n" // scale >>= 1
+ "vdup.u16 q9, %3 \n" // interval multiply.
+ "vdup.u16 q10, %4 \n" // interval add
+
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0] \n" // load 8 pixels of ARGB.
+ "subs %1, %1, #8 \n" // 8 processed per loop.
+ "vmovl.u8 q0, d0 \n" // b (0 .. 255)
+ "vmovl.u8 q1, d2 \n"
+ "vmovl.u8 q2, d4 \n"
+ "vqdmulh.s16 q0, q0, q8 \n" // b * scale
+ "vqdmulh.s16 q1, q1, q8 \n" // g
+ "vqdmulh.s16 q2, q2, q8 \n" // r
+ "vmul.u16 q0, q0, q9 \n" // b * interval_size
+ "vmul.u16 q1, q1, q9 \n" // g
+ "vmul.u16 q2, q2, q9 \n" // r
+ "vadd.u16 q0, q0, q10 \n" // b + interval_offset
+ "vadd.u16 q1, q1, q10 \n" // g
+ "vadd.u16 q2, q2, q10 \n" // r
+ "vqmovn.u16 d0, q0 \n"
+ "vqmovn.u16 d2, q1 \n"
+ "vqmovn.u16 d4, q2 \n"
+ MEMACCESS(0)
+ "vst4.8 {d0, d2, d4, d6}, [%0]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(width) // %1
+ : "r"(scale), // %2
+ "r"(interval_size), // %3
+ "r"(interval_offset) // %4
+ : "cc", "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10"
+ );
+}
+
+// Shade 8 pixels at a time by specified value.
+// NOTE vqrdmulh.s16 q10, q10, d0[0] must use a scaler register from 0 to 8.
+// Rounding in vqrdmulh does +1 to high if high bit of low s16 is set.
+void ARGBShadeRow_NEON(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value) {
+ asm volatile (
+ "vdup.u32 q0, %3 \n" // duplicate scale value.
+ "vzip.u8 d0, d1 \n" // d0 aarrggbb.
+ "vshr.u16 q0, q0, #1 \n" // scale / 2.
+
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d20, d22, d24, d26}, [%0]! \n" // load 8 pixels of ARGB.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmovl.u8 q10, d20 \n" // b (0 .. 255)
+ "vmovl.u8 q11, d22 \n"
+ "vmovl.u8 q12, d24 \n"
+ "vmovl.u8 q13, d26 \n"
+ "vqrdmulh.s16 q10, q10, d0[0] \n" // b * scale * 2
+ "vqrdmulh.s16 q11, q11, d0[1] \n" // g
+ "vqrdmulh.s16 q12, q12, d0[2] \n" // r
+ "vqrdmulh.s16 q13, q13, d0[3] \n" // a
+ "vqmovn.u16 d20, q10 \n"
+ "vqmovn.u16 d22, q11 \n"
+ "vqmovn.u16 d24, q12 \n"
+ "vqmovn.u16 d26, q13 \n"
+ MEMACCESS(1)
+ "vst4.8 {d20, d22, d24, d26}, [%1]! \n" // store 8 pixels of ARGB.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(value) // %3
+ : "cc", "memory", "q0", "q10", "q11", "q12", "q13"
+ );
+}
+
+// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels
+// Similar to ARGBToYJ but stores ARGB.
+// C code is (15 * b + 75 * g + 38 * r + 64) >> 7;
+void ARGBGrayRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ "vmov.u8 d24, #15 \n" // B * 0.11400 coefficient
+ "vmov.u8 d25, #75 \n" // G * 0.58700 coefficient
+ "vmov.u8 d26, #38 \n" // R * 0.29900 coefficient
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmull.u8 q2, d0, d24 \n" // B
+ "vmlal.u8 q2, d1, d25 \n" // G
+ "vmlal.u8 q2, d2, d26 \n" // R
+ "vqrshrun.s16 d0, q2, #7 \n" // 15 bit to 8 bit B
+ "vmov d1, d0 \n" // G
+ "vmov d2, d0 \n" // R
+ MEMACCESS(1)
+ "vst4.8 {d0, d1, d2, d3}, [%1]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q12", "q13"
+ );
+}
+
+// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
+// b = (r * 35 + g * 68 + b * 17) >> 7
+// g = (r * 45 + g * 88 + b * 22) >> 7
+// r = (r * 50 + g * 98 + b * 24) >> 7
+void ARGBSepiaRow_NEON(uint8* dst_argb, int width) {
+ asm volatile (
+ "vmov.u8 d20, #17 \n" // BB coefficient
+ "vmov.u8 d21, #68 \n" // BG coefficient
+ "vmov.u8 d22, #35 \n" // BR coefficient
+ "vmov.u8 d24, #22 \n" // GB coefficient
+ "vmov.u8 d25, #88 \n" // GG coefficient
+ "vmov.u8 d26, #45 \n" // GR coefficient
+ "vmov.u8 d28, #24 \n" // BB coefficient
+ "vmov.u8 d29, #98 \n" // BG coefficient
+ "vmov.u8 d30, #50 \n" // BR coefficient
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0] \n" // load 8 ARGB pixels.
+ "subs %1, %1, #8 \n" // 8 processed per loop.
+ "vmull.u8 q2, d0, d20 \n" // B to Sepia B
+ "vmlal.u8 q2, d1, d21 \n" // G
+ "vmlal.u8 q2, d2, d22 \n" // R
+ "vmull.u8 q3, d0, d24 \n" // B to Sepia G
+ "vmlal.u8 q3, d1, d25 \n" // G
+ "vmlal.u8 q3, d2, d26 \n" // R
+ "vmull.u8 q8, d0, d28 \n" // B to Sepia R
+ "vmlal.u8 q8, d1, d29 \n" // G
+ "vmlal.u8 q8, d2, d30 \n" // R
+ "vqshrn.u16 d0, q2, #7 \n" // 16 bit to 8 bit B
+ "vqshrn.u16 d1, q3, #7 \n" // 16 bit to 8 bit G
+ "vqshrn.u16 d2, q8, #7 \n" // 16 bit to 8 bit R
+ MEMACCESS(0)
+ "vst4.8 {d0, d1, d2, d3}, [%0]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(width) // %1
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3",
+ "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// Tranform 8 ARGB pixels (32 bytes) with color matrix.
+// TODO(fbarchard): Was same as Sepia except matrix is provided. This function
+// needs to saturate. Consider doing a non-saturating version.
+void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width) {
+ asm volatile (
+ MEMACCESS(3)
+ "vld1.8 {q2}, [%3] \n" // load 3 ARGB vectors.
+ "vmovl.s8 q0, d4 \n" // B,G coefficients s16.
+ "vmovl.s8 q1, d5 \n" // R,A coefficients s16.
+
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d16, d18, d20, d22}, [%0]! \n" // load 8 ARGB pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop.
+ "vmovl.u8 q8, d16 \n" // b (0 .. 255) 16 bit
+ "vmovl.u8 q9, d18 \n" // g
+ "vmovl.u8 q10, d20 \n" // r
+ "vmovl.u8 q11, d22 \n" // a
+ "vmul.s16 q12, q8, d0[0] \n" // B = B * Matrix B
+ "vmul.s16 q13, q8, d1[0] \n" // G = B * Matrix G
+ "vmul.s16 q14, q8, d2[0] \n" // R = B * Matrix R
+ "vmul.s16 q15, q8, d3[0] \n" // A = B * Matrix A
+ "vmul.s16 q4, q9, d0[1] \n" // B += G * Matrix B
+ "vmul.s16 q5, q9, d1[1] \n" // G += G * Matrix G
+ "vmul.s16 q6, q9, d2[1] \n" // R += G * Matrix R
+ "vmul.s16 q7, q9, d3[1] \n" // A += G * Matrix A
+ "vqadd.s16 q12, q12, q4 \n" // Accumulate B
+ "vqadd.s16 q13, q13, q5 \n" // Accumulate G
+ "vqadd.s16 q14, q14, q6 \n" // Accumulate R
+ "vqadd.s16 q15, q15, q7 \n" // Accumulate A
+ "vmul.s16 q4, q10, d0[2] \n" // B += R * Matrix B
+ "vmul.s16 q5, q10, d1[2] \n" // G += R * Matrix G
+ "vmul.s16 q6, q10, d2[2] \n" // R += R * Matrix R
+ "vmul.s16 q7, q10, d3[2] \n" // A += R * Matrix A
+ "vqadd.s16 q12, q12, q4 \n" // Accumulate B
+ "vqadd.s16 q13, q13, q5 \n" // Accumulate G
+ "vqadd.s16 q14, q14, q6 \n" // Accumulate R
+ "vqadd.s16 q15, q15, q7 \n" // Accumulate A
+ "vmul.s16 q4, q11, d0[3] \n" // B += A * Matrix B
+ "vmul.s16 q5, q11, d1[3] \n" // G += A * Matrix G
+ "vmul.s16 q6, q11, d2[3] \n" // R += A * Matrix R
+ "vmul.s16 q7, q11, d3[3] \n" // A += A * Matrix A
+ "vqadd.s16 q12, q12, q4 \n" // Accumulate B
+ "vqadd.s16 q13, q13, q5 \n" // Accumulate G
+ "vqadd.s16 q14, q14, q6 \n" // Accumulate R
+ "vqadd.s16 q15, q15, q7 \n" // Accumulate A
+ "vqshrun.s16 d16, q12, #6 \n" // 16 bit to 8 bit B
+ "vqshrun.s16 d18, q13, #6 \n" // 16 bit to 8 bit G
+ "vqshrun.s16 d20, q14, #6 \n" // 16 bit to 8 bit R
+ "vqshrun.s16 d22, q15, #6 \n" // 16 bit to 8 bit A
+ MEMACCESS(1)
+ "vst4.8 {d16, d18, d20, d22}, [%1]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(matrix_argb) // %3
+ : "cc", "memory", "q0", "q1", "q2", "q4", "q5", "q6", "q7", "q8", "q9",
+ "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+// TODO(fbarchard): fix vqshrun in ARGBMultiplyRow_NEON and reenable.
+#ifdef HAS_ARGBMULTIPLYROW_NEON
+// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
+void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(1)
+ "vld4.8 {d1, d3, d5, d7}, [%1]! \n" // load 8 more ARGB pixels.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vmull.u8 q0, d0, d1 \n" // multiply B
+ "vmull.u8 q1, d2, d3 \n" // multiply G
+ "vmull.u8 q2, d4, d5 \n" // multiply R
+ "vmull.u8 q3, d6, d7 \n" // multiply A
+ "vrshrn.u16 d0, q0, #8 \n" // 16 bit to 8 bit B
+ "vrshrn.u16 d1, q1, #8 \n" // 16 bit to 8 bit G
+ "vrshrn.u16 d2, q2, #8 \n" // 16 bit to 8 bit R
+ "vrshrn.u16 d3, q3, #8 \n" // 16 bit to 8 bit A
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3"
+ );
+}
+#endif // HAS_ARGBMULTIPLYROW_NEON
+
+// Add 2 rows of ARGB pixels together, 8 pixels at a time.
+void ARGBAddRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(1)
+ "vld4.8 {d4, d5, d6, d7}, [%1]! \n" // load 8 more ARGB pixels.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vqadd.u8 q0, q0, q2 \n" // add B, G
+ "vqadd.u8 q1, q1, q3 \n" // add R, A
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3"
+ );
+}
+
+// Subtract 2 rows of ARGB pixels, 8 pixels at a time.
+void ARGBSubtractRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(1)
+ "vld4.8 {d4, d5, d6, d7}, [%1]! \n" // load 8 more ARGB pixels.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vqsub.u8 q0, q0, q2 \n" // subtract B, G
+ "vqsub.u8 q1, q1, q3 \n" // subtract R, A
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1", "q2", "q3"
+ );
+}
+
+// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
+// A = 255
+// R = Sobel
+// G = Sobel
+// B = Sobel
+void SobelRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "vmov.u8 d3, #255 \n" // alpha
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d0}, [%0]! \n" // load 8 sobelx.
+ MEMACCESS(1)
+ "vld1.8 {d1}, [%1]! \n" // load 8 sobely.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vqadd.u8 d0, d0, d1 \n" // add
+ "vmov.u8 d1, d0 \n"
+ "vmov.u8 d2, d0 \n"
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1"
+ );
+}
+
+// Adds Sobel X and Sobel Y and stores Sobel into plane.
+void SobelToPlaneRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width) {
+ asm volatile (
+ // 16 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load 16 sobelx.
+ MEMACCESS(1)
+ "vld1.8 {q1}, [%1]! \n" // load 16 sobely.
+ "subs %3, %3, #16 \n" // 16 processed per loop.
+ "vqadd.u8 q0, q0, q1 \n" // add
+ MEMACCESS(2)
+ "vst1.8 {q0}, [%2]! \n" // store 16 pixels.
+ "bgt 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_y), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1"
+ );
+}
+
+// Mixes Sobel X, Sobel Y and Sobel into ARGB.
+// A = 255
+// R = Sobel X
+// G = Sobel
+// B = Sobel Y
+void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "vmov.u8 d3, #255 \n" // alpha
+ // 8 pixel loop.
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d2}, [%0]! \n" // load 8 sobelx.
+ MEMACCESS(1)
+ "vld1.8 {d0}, [%1]! \n" // load 8 sobely.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vqadd.u8 d1, d0, d2 \n" // add
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n" // store 8 ARGB pixels.
+ "bgt 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "q0", "q1"
+ );
+}
+
+// SobelX as a matrix is
+// -1 0 1
+// -2 0 2
+// -1 0 1
+void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobelx, int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d0}, [%0],%5 \n" // top
+ MEMACCESS(0)
+ "vld1.8 {d1}, [%0],%6 \n"
+ "vsubl.u8 q0, d0, d1 \n"
+ MEMACCESS(1)
+ "vld1.8 {d2}, [%1],%5 \n" // center * 2
+ MEMACCESS(1)
+ "vld1.8 {d3}, [%1],%6 \n"
+ "vsubl.u8 q1, d2, d3 \n"
+ "vadd.s16 q0, q0, q1 \n"
+ "vadd.s16 q0, q0, q1 \n"
+ MEMACCESS(2)
+ "vld1.8 {d2}, [%2],%5 \n" // bottom
+ MEMACCESS(2)
+ "vld1.8 {d3}, [%2],%6 \n"
+ "subs %4, %4, #8 \n" // 8 pixels
+ "vsubl.u8 q1, d2, d3 \n"
+ "vadd.s16 q0, q0, q1 \n"
+ "vabs.s16 q0, q0 \n"
+ "vqmovn.u16 d0, q0 \n"
+ MEMACCESS(3)
+ "vst1.8 {d0}, [%3]! \n" // store 8 sobelx
+ "bgt 1b \n"
+ : "+r"(src_y0), // %0
+ "+r"(src_y1), // %1
+ "+r"(src_y2), // %2
+ "+r"(dst_sobelx), // %3
+ "+r"(width) // %4
+ : "r"(2), // %5
+ "r"(6) // %6
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+
+// SobelY as a matrix is
+// -1 -2 -1
+// 0 0 0
+// 1 2 1
+void SobelYRow_NEON(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d0}, [%0],%4 \n" // left
+ MEMACCESS(1)
+ "vld1.8 {d1}, [%1],%4 \n"
+ "vsubl.u8 q0, d0, d1 \n"
+ MEMACCESS(0)
+ "vld1.8 {d2}, [%0],%4 \n" // center * 2
+ MEMACCESS(1)
+ "vld1.8 {d3}, [%1],%4 \n"
+ "vsubl.u8 q1, d2, d3 \n"
+ "vadd.s16 q0, q0, q1 \n"
+ "vadd.s16 q0, q0, q1 \n"
+ MEMACCESS(0)
+ "vld1.8 {d2}, [%0],%5 \n" // right
+ MEMACCESS(1)
+ "vld1.8 {d3}, [%1],%5 \n"
+ "subs %3, %3, #8 \n" // 8 pixels
+ "vsubl.u8 q1, d2, d3 \n"
+ "vadd.s16 q0, q0, q1 \n"
+ "vabs.s16 q0, q0 \n"
+ "vqmovn.u16 d0, q0 \n"
+ MEMACCESS(2)
+ "vst1.8 {d0}, [%2]! \n" // store 8 sobely
+ "bgt 1b \n"
+ : "+r"(src_y0), // %0
+ "+r"(src_y1), // %1
+ "+r"(dst_sobely), // %2
+ "+r"(width) // %3
+ : "r"(1), // %4
+ "r"(6) // %5
+ : "cc", "memory", "q0", "q1" // Clobber List
+ );
+}
+#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_neon64.cc b/media/libaom/src/third_party/libyuv/source/row_neon64.cc
new file mode 100644
index 000000000..5d015454b
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_neon64.cc
@@ -0,0 +1,3087 @@
+/*
+ * Copyright 2014 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC Neon armv8 64 bit.
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+// Read 8 Y, 4 U and 4 V from 422
+#define READYUV422 \
+ MEMACCESS(0) \
+ "ld1 {v0.8b}, [%0], #8 \n" \
+ MEMACCESS(1) \
+ "ld1 {v1.s}[0], [%1], #4 \n" \
+ MEMACCESS(2) \
+ "ld1 {v1.s}[1], [%2], #4 \n"
+
+// Read 8 Y, 2 U and 2 V from 422
+#define READYUV411 \
+ MEMACCESS(0) \
+ "ld1 {v0.8b}, [%0], #8 \n" \
+ MEMACCESS(1) \
+ "ld1 {v2.h}[0], [%1], #2 \n" \
+ MEMACCESS(2) \
+ "ld1 {v2.h}[1], [%2], #2 \n" \
+ "zip1 v1.8b, v2.8b, v2.8b \n"
+
+// Read 8 Y, 8 U and 8 V from 444
+#define READYUV444 \
+ MEMACCESS(0) \
+ "ld1 {v0.8b}, [%0], #8 \n" \
+ MEMACCESS(1) \
+ "ld1 {v1.d}[0], [%1], #8 \n" \
+ MEMACCESS(2) \
+ "ld1 {v1.d}[1], [%2], #8 \n" \
+ "uaddlp v1.8h, v1.16b \n" \
+ "rshrn v1.8b, v1.8h, #1 \n"
+
+// Read 8 Y, and set 4 U and 4 V to 128
+#define READYUV400 \
+ MEMACCESS(0) \
+ "ld1 {v0.8b}, [%0], #8 \n" \
+ "movi v1.8b , #128 \n"
+
+// Read 8 Y and 4 UV from NV12
+#define READNV12 \
+ MEMACCESS(0) \
+ "ld1 {v0.8b}, [%0], #8 \n" \
+ MEMACCESS(1) \
+ "ld1 {v2.8b}, [%1], #8 \n" \
+ "uzp1 v1.8b, v2.8b, v2.8b \n" \
+ "uzp2 v3.8b, v2.8b, v2.8b \n" \
+ "ins v1.s[1], v3.s[0] \n"
+
+// Read 8 Y and 4 VU from NV21
+#define READNV21 \
+ MEMACCESS(0) \
+ "ld1 {v0.8b}, [%0], #8 \n" \
+ MEMACCESS(1) \
+ "ld1 {v2.8b}, [%1], #8 \n" \
+ "uzp1 v3.8b, v2.8b, v2.8b \n" \
+ "uzp2 v1.8b, v2.8b, v2.8b \n" \
+ "ins v1.s[1], v3.s[0] \n"
+
+// Read 8 YUY2
+#define READYUY2 \
+ MEMACCESS(0) \
+ "ld2 {v0.8b, v1.8b}, [%0], #16 \n" \
+ "uzp2 v3.8b, v1.8b, v1.8b \n" \
+ "uzp1 v1.8b, v1.8b, v1.8b \n" \
+ "ins v1.s[1], v3.s[0] \n"
+
+// Read 8 UYVY
+#define READUYVY \
+ MEMACCESS(0) \
+ "ld2 {v2.8b, v3.8b}, [%0], #16 \n" \
+ "orr v0.8b, v3.8b, v3.8b \n" \
+ "uzp1 v1.8b, v2.8b, v2.8b \n" \
+ "uzp2 v3.8b, v2.8b, v2.8b \n" \
+ "ins v1.s[1], v3.s[0] \n"
+
+#define YUV422TORGB_SETUP_REG \
+ "ld1r {v24.8h}, [%[kUVBiasBGR]], #2 \n" \
+ "ld1r {v25.8h}, [%[kUVBiasBGR]], #2 \n" \
+ "ld1r {v26.8h}, [%[kUVBiasBGR]] \n" \
+ "ld1r {v31.4s}, [%[kYToRgb]] \n" \
+ "movi v27.8h, #128 \n" \
+ "movi v28.8h, #102 \n" \
+ "movi v29.8h, #25 \n" \
+ "movi v30.8h, #52 \n"
+
+#define YUV422TORGB(vR, vG, vB) \
+ "uxtl v0.8h, v0.8b \n" /* Extract Y */ \
+ "shll v2.8h, v1.8b, #8 \n" /* Replicate UV */ \
+ "ushll2 v3.4s, v0.8h, #0 \n" /* Y */ \
+ "ushll v0.4s, v0.4h, #0 \n" \
+ "mul v3.4s, v3.4s, v31.4s \n" \
+ "mul v0.4s, v0.4s, v31.4s \n" \
+ "sqshrun v0.4h, v0.4s, #16 \n" \
+ "sqshrun2 v0.8h, v3.4s, #16 \n" /* Y */ \
+ "uaddw v1.8h, v2.8h, v1.8b \n" /* Replicate UV */ \
+ "mov v2.d[0], v1.d[1] \n" /* Extract V */ \
+ "uxtl v2.8h, v2.8b \n" \
+ "uxtl v1.8h, v1.8b \n" /* Extract U */ \
+ "mul v3.8h, v1.8h, v27.8h \n" \
+ "mul v5.8h, v1.8h, v29.8h \n" \
+ "mul v6.8h, v2.8h, v30.8h \n" \
+ "mul v7.8h, v2.8h, v28.8h \n" \
+ "sqadd v6.8h, v6.8h, v5.8h \n" \
+ "sqadd " #vB ".8h, v24.8h, v0.8h \n" /* B */ \
+ "sqadd " #vG ".8h, v25.8h, v0.8h \n" /* G */ \
+ "sqadd " #vR ".8h, v26.8h, v0.8h \n" /* R */ \
+ "sqadd " #vB ".8h, " #vB ".8h, v3.8h \n" /* B */ \
+ "sqsub " #vG ".8h, " #vG ".8h, v6.8h \n" /* G */ \
+ "sqadd " #vR ".8h, " #vR ".8h, v7.8h \n" /* R */ \
+ "sqshrun " #vB ".8b, " #vB ".8h, #6 \n" /* B */ \
+ "sqshrun " #vG ".8b, " #vG ".8h, #6 \n" /* G */ \
+ "sqshrun " #vR ".8b, " #vR ".8h, #6 \n" /* R */ \
+
+// YUV to RGB conversion constants.
+// Y contribution to R,G,B. Scale and bias.
+#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
+#define YGB 1160 /* 1.164 * 64 * 16 - adjusted for even error distribution */
+
+// U and V contributions to R,G,B.
+#define UB -128 /* -min(128, round(2.018 * 64)) */
+#define UG 25 /* -round(-0.391 * 64) */
+#define VG 52 /* -round(-0.813 * 64) */
+#define VR -102 /* -round(1.596 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BB (UB * 128 - YGB)
+#define BG (UG * 128 + VG * 128 - YGB)
+#define BR (VR * 128 - YGB)
+
+static vec16 kUVBiasBGR = { BB, BG, BR, 0, 0, 0, 0, 0 };
+static vec32 kYToRgb = { 0x0101 * YG, 0, 0, 0 };
+
+#undef YG
+#undef YGB
+#undef UB
+#undef UG
+#undef VG
+#undef VR
+#undef BB
+#undef BG
+#undef BR
+
+#define RGBTOUV_SETUP_REG \
+ "movi v20.8h, #56, lsl #0 \n" /* UB/VR coefficient (0.875) / 2 */ \
+ "movi v21.8h, #37, lsl #0 \n" /* UG coefficient (-0.5781) / 2 */ \
+ "movi v22.8h, #19, lsl #0 \n" /* UR coefficient (-0.2969) / 2 */ \
+ "movi v23.8h, #9, lsl #0 \n" /* VB coefficient (-0.1406) / 2 */ \
+ "movi v24.8h, #47, lsl #0 \n" /* VG coefficient (-0.7344) / 2 */ \
+ "movi v25.16b, #0x80 \n" /* 128.5 (0x8080 in 16-bit) */
+
+
+#ifdef HAS_I444TOARGBROW_NEON
+void I444ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV444
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ "movi v23.8b, #255 \n" /* A */
+ MEMACCESS(3)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I444TOARGBROW_NEON
+
+#ifdef HAS_I422TOARGBROW_NEON
+void I422ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ "movi v23.8b, #255 \n" /* A */
+ MEMACCESS(3)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TOARGBROW_NEON
+
+#ifdef HAS_I411TOARGBROW_NEON
+void I411ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV411
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ "movi v23.8b, #255 \n" /* A */
+ MEMACCESS(3)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I411TOARGBROW_NEON
+
+#ifdef HAS_I422TOBGRAROW_NEON
+void I422ToBGRARow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_bgra,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v21, v22, v23)
+ "subs %w4, %w4, #8 \n"
+ "movi v20.8b, #255 \n" /* A */
+ MEMACCESS(3)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_bgra), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TOBGRAROW_NEON
+
+#ifdef HAS_I422TOABGRROW_NEON
+void I422ToABGRRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_abgr,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v20, v21, v22)
+ "subs %w4, %w4, #8 \n"
+ "movi v23.8b, #255 \n" /* A */
+ MEMACCESS(3)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_abgr), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TOABGRROW_NEON
+
+#ifdef HAS_I422TORGBAROW_NEON
+void I422ToRGBARow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgba,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v23, v22, v21)
+ "subs %w4, %w4, #8 \n"
+ "movi v20.8b, #255 \n" /* A */
+ MEMACCESS(3)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%3], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_rgba), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TORGBAROW_NEON
+
+#ifdef HAS_I422TORGB24ROW_NEON
+void I422ToRGB24Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb24,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ MEMACCESS(3)
+ "st3 {v20.8b,v21.8b,v22.8b}, [%3], #24 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_rgb24), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TORGB24ROW_NEON
+
+#ifdef HAS_I422TORAWROW_NEON
+void I422ToRAWRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_raw,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v20, v21, v22)
+ "subs %w4, %w4, #8 \n"
+ MEMACCESS(3)
+ "st3 {v20.8b,v21.8b,v22.8b}, [%3], #24 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_raw), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TORAWROW_NEON
+
+#define ARGBTORGB565 \
+ "shll v0.8h, v22.8b, #8 \n" /* R */ \
+ "shll v20.8h, v20.8b, #8 \n" /* B */ \
+ "shll v21.8h, v21.8b, #8 \n" /* G */ \
+ "sri v0.8h, v21.8h, #5 \n" /* RG */ \
+ "sri v0.8h, v20.8h, #11 \n" /* RGB */
+
+#ifdef HAS_I422TORGB565ROW_NEON
+void I422ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_rgb565,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ ARGBTORGB565
+ MEMACCESS(3)
+ "st1 {v0.8h}, [%3], #16 \n" // store 8 pixels RGB565.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_rgb565), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TORGB565ROW_NEON
+
+#define ARGBTOARGB1555 \
+ "shll v0.8h, v23.8b, #8 \n" /* A */ \
+ "shll v22.8h, v22.8b, #8 \n" /* R */ \
+ "shll v20.8h, v20.8b, #8 \n" /* B */ \
+ "shll v21.8h, v21.8b, #8 \n" /* G */ \
+ "sri v0.8h, v22.8h, #1 \n" /* AR */ \
+ "sri v0.8h, v21.8h, #6 \n" /* ARG */ \
+ "sri v0.8h, v20.8h, #11 \n" /* ARGB */
+
+#ifdef HAS_I422TOARGB1555ROW_NEON
+void I422ToARGB1555Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb1555,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ "movi v23.8b, #255 \n"
+ ARGBTOARGB1555
+ MEMACCESS(3)
+ "st1 {v0.8h}, [%3], #16 \n" // store 8 pixels RGB565.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb1555), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TOARGB1555ROW_NEON
+
+#define ARGBTOARGB4444 \
+ /* Input v20.8b<=B, v21.8b<=G, v22.8b<=R, v23.8b<=A, v4.8b<=0x0f */ \
+ "ushr v20.8b, v20.8b, #4 \n" /* B */ \
+ "bic v21.8b, v21.8b, v4.8b \n" /* G */ \
+ "ushr v22.8b, v22.8b, #4 \n" /* R */ \
+ "bic v23.8b, v23.8b, v4.8b \n" /* A */ \
+ "orr v0.8b, v20.8b, v21.8b \n" /* BG */ \
+ "orr v1.8b, v22.8b, v23.8b \n" /* RA */ \
+ "zip1 v0.16b, v0.16b, v1.16b \n" /* BGRA */
+
+#ifdef HAS_I422TOARGB4444ROW_NEON
+void I422ToARGB4444Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb4444,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "movi v4.16b, #0x0f \n" // bits to clear with vbic.
+ "1: \n"
+ READYUV422
+ YUV422TORGB(v22, v21, v20)
+ "subs %w4, %w4, #8 \n"
+ "movi v23.8b, #255 \n"
+ ARGBTOARGB4444
+ MEMACCESS(3)
+ "st1 {v0.8h}, [%3], #16 \n" // store 8 pixels ARGB4444.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_argb4444), // %3
+ "+r"(width) // %4
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I422TOARGB4444ROW_NEON
+
+#ifdef HAS_I400TOARGBROW_NEON
+void I400ToARGBRow_NEON(const uint8* src_y,
+ uint8* dst_argb,
+ int width) {
+ int64 width64 = (int64)(width);
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUV400
+ YUV422TORGB(v22, v21, v20)
+ "subs %w2, %w2, #8 \n"
+ "movi v23.8b, #255 \n"
+ MEMACCESS(1)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width64) // %2
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_I400TOARGBROW_NEON
+
+#ifdef HAS_J400TOARGBROW_NEON
+void J400ToARGBRow_NEON(const uint8* src_y,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ "movi v23.8b, #255 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v20.8b}, [%0], #8 \n"
+ "orr v21.8b, v20.8b, v20.8b \n"
+ "orr v22.8b, v20.8b, v20.8b \n"
+ "subs %w2, %w2, #8 \n"
+ MEMACCESS(1)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "v20", "v21", "v22", "v23"
+ );
+}
+#endif // HAS_J400TOARGBROW_NEON
+
+#ifdef HAS_NV12TOARGBROW_NEON
+void NV12ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READNV12
+ YUV422TORGB(v22, v21, v20)
+ "subs %w3, %w3, #8 \n"
+ "movi v23.8b, #255 \n"
+ MEMACCESS(2)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%2], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_NV12TOARGBROW_NEON
+
+#ifdef HAS_NV21TOARGBROW_NEON
+void NV21ToARGBRow_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_argb,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READNV21
+ YUV422TORGB(v22, v21, v20)
+ "subs %w3, %w3, #8 \n"
+ "movi v23.8b, #255 \n"
+ MEMACCESS(2)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%2], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_NV21TOARGBROW_NEON
+
+#ifdef HAS_NV12TORGB565ROW_NEON
+void NV12ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_rgb565,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READNV12
+ YUV422TORGB(v22, v21, v20)
+ "subs %w3, %w3, #8 \n"
+ ARGBTORGB565
+ MEMACCESS(2)
+ "st1 {v0.8h}, [%2], 16 \n" // store 8 pixels RGB565.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_rgb565), // %2
+ "+r"(width) // %3
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_NV12TORGB565ROW_NEON
+
+#ifdef HAS_NV21TORGB565ROW_NEON
+void NV21ToRGB565Row_NEON(const uint8* src_y,
+ const uint8* src_uv,
+ uint8* dst_rgb565,
+ int width) {
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READNV21
+ YUV422TORGB(v22, v21, v20)
+ "subs %w3, %w3, #8 \n"
+ ARGBTORGB565
+ MEMACCESS(2)
+ "st1 {v0.8h}, [%2], 16 \n" // store 8 pixels RGB565.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_uv), // %1
+ "+r"(dst_rgb565), // %2
+ "+r"(width) // %3
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_NV21TORGB565ROW_NEON
+
+#ifdef HAS_YUY2TOARGBROW_NEON
+void YUY2ToARGBRow_NEON(const uint8* src_yuy2,
+ uint8* dst_argb,
+ int width) {
+ int64 width64 = (int64)(width);
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READYUY2
+ YUV422TORGB(v22, v21, v20)
+ "subs %w2, %w2, #8 \n"
+ "movi v23.8b, #255 \n"
+ MEMACCESS(1)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width64) // %2
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_YUY2TOARGBROW_NEON
+
+#ifdef HAS_UYVYTOARGBROW_NEON
+void UYVYToARGBRow_NEON(const uint8* src_uyvy,
+ uint8* dst_argb,
+ int width) {
+ int64 width64 = (int64)(width);
+ asm volatile (
+ YUV422TORGB_SETUP_REG
+ "1: \n"
+ READUYVY
+ YUV422TORGB(v22, v21, v20)
+ "subs %w2, %w2, #8 \n"
+ "movi v23.8b, #255 \n"
+ MEMACCESS(1)
+ "st4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], 32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width64) // %2
+ : [kUVBiasBGR]"r"(&kUVBiasBGR),
+ [kYToRgb]"r"(&kYToRgb)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20",
+ "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_UYVYTOARGBROW_NEON
+
+// Reads 16 pairs of UV and write even values to dst_u and odd to dst_v.
+#ifdef HAS_SPLITUVROW_NEON
+void SplitUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld2 {v0.16b,v1.16b}, [%0], #32 \n" // load 16 pairs of UV
+ "subs %w3, %w3, #16 \n" // 16 processed per loop
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n" // store U
+ MEMACCESS(2)
+ "st1 {v1.16b}, [%2], #16 \n" // store V
+ "b.gt 1b \n"
+ : "+r"(src_uv), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(width) // %3 // Output registers
+ : // Input registers
+ : "cc", "memory", "v0", "v1" // Clobber List
+ );
+}
+#endif // HAS_SPLITUVROW_NEON
+
+// Reads 16 U's and V's and writes out 16 pairs of UV.
+#ifdef HAS_MERGEUVROW_NEON
+void MergeUVRow_NEON(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load U
+ MEMACCESS(1)
+ "ld1 {v1.16b}, [%1], #16 \n" // load V
+ "subs %w3, %w3, #16 \n" // 16 processed per loop
+ MEMACCESS(2)
+ "st2 {v0.16b,v1.16b}, [%2], #32 \n" // store 16 pairs of UV
+ "b.gt 1b \n"
+ :
+ "+r"(src_u), // %0
+ "+r"(src_v), // %1
+ "+r"(dst_uv), // %2
+ "+r"(width) // %3 // Output registers
+ : // Input registers
+ : "cc", "memory", "v0", "v1" // Clobber List
+ );
+}
+#endif // HAS_MERGEUVROW_NEON
+
+// Copy multiple of 32. vld4.8 allow unaligned and is fastest on a15.
+#ifdef HAS_COPYROW_NEON
+void CopyRow_NEON(const uint8* src, uint8* dst, int count) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 32
+ "subs %w2, %w2, #32 \n" // 32 processed per loop
+ MEMACCESS(1)
+ "st1 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n" // store 32
+ "b.gt 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(count) // %2 // Output registers
+ : // Input registers
+ : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+#endif // HAS_COPYROW_NEON
+
+// SetRow writes 'count' bytes using an 8 bit value repeated.
+void SetRow_NEON(uint8* dst, uint8 v8, int count) {
+ asm volatile (
+ "dup v0.16b, %w2 \n" // duplicate 16 bytes
+ "1: \n"
+ "subs %w1, %w1, #16 \n" // 16 bytes per loop
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n" // store
+ "b.gt 1b \n"
+ : "+r"(dst), // %0
+ "+r"(count) // %1
+ : "r"(v8) // %2
+ : "cc", "memory", "v0"
+ );
+}
+
+void ARGBSetRow_NEON(uint8* dst, uint32 v32, int count) {
+ asm volatile (
+ "dup v0.4s, %w2 \n" // duplicate 4 ints
+ "1: \n"
+ "subs %w1, %w1, #4 \n" // 4 ints per loop
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n" // store
+ "b.gt 1b \n"
+ : "+r"(dst), // %0
+ "+r"(count) // %1
+ : "r"(v32) // %2
+ : "cc", "memory", "v0"
+ );
+}
+
+#ifdef HAS_MIRRORROW_NEON
+void MirrorRow_NEON(const uint8* src, uint8* dst, int width) {
+ int64 width64 = (int64) width;
+ asm volatile (
+ // Start at end of source row.
+ "add %0, %0, %2 \n"
+ "sub %0, %0, #16 \n"
+
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], %3 \n" // src -= 16
+ "subs %2, %2, #16 \n" // 16 pixels per loop.
+ "rev64 v0.16b, v0.16b \n"
+ MEMACCESS(1)
+ "st1 {v0.D}[1], [%1], #8 \n" // dst += 16
+ MEMACCESS(1)
+ "st1 {v0.D}[0], [%1], #8 \n"
+ "b.gt 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width64) // %2
+ : "r"((ptrdiff_t)-16) // %3
+ : "cc", "memory", "v0"
+ );
+}
+#endif // HAS_MIRRORROW_NEON
+
+#ifdef HAS_MIRRORUVROW_NEON
+void MirrorUVRow_NEON(const uint8* src_uv, uint8* dst_u, uint8* dst_v,
+ int width) {
+ int64 width64 = (int64) width;
+ asm volatile (
+ // Start at end of source row.
+ "add %0, %0, %3, lsl #1 \n"
+ "sub %0, %0, #16 \n"
+
+ "1: \n"
+ MEMACCESS(0)
+ "ld2 {v0.8b, v1.8b}, [%0], %4 \n" // src -= 16
+ "subs %3, %3, #8 \n" // 8 pixels per loop.
+ "rev64 v0.8b, v0.8b \n"
+ "rev64 v1.8b, v1.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // dst += 8
+ MEMACCESS(2)
+ "st1 {v1.8b}, [%2], #8 \n"
+ "b.gt 1b \n"
+ : "+r"(src_uv), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(width64) // %3
+ : "r"((ptrdiff_t)-16) // %4
+ : "cc", "memory", "v0", "v1"
+ );
+}
+#endif // HAS_MIRRORUVROW_NEON
+
+#ifdef HAS_ARGBMIRRORROW_NEON
+void ARGBMirrorRow_NEON(const uint8* src, uint8* dst, int width) {
+ int64 width64 = (int64) width;
+ asm volatile (
+ // Start at end of source row.
+ "add %0, %0, %2, lsl #2 \n"
+ "sub %0, %0, #16 \n"
+
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], %3 \n" // src -= 16
+ "subs %2, %2, #4 \n" // 4 pixels per loop.
+ "rev64 v0.4s, v0.4s \n"
+ MEMACCESS(1)
+ "st1 {v0.D}[1], [%1], #8 \n" // dst += 16
+ MEMACCESS(1)
+ "st1 {v0.D}[0], [%1], #8 \n"
+ "b.gt 1b \n"
+ : "+r"(src), // %0
+ "+r"(dst), // %1
+ "+r"(width64) // %2
+ : "r"((ptrdiff_t)-16) // %3
+ : "cc", "memory", "v0"
+ );
+}
+#endif // HAS_ARGBMIRRORROW_NEON
+
+#ifdef HAS_RGB24TOARGBROW_NEON
+void RGB24ToARGBRow_NEON(const uint8* src_rgb24, uint8* dst_argb, int pix) {
+ asm volatile (
+ "movi v4.8b, #255 \n" // Alpha
+ "1: \n"
+ MEMACCESS(0)
+ "ld3 {v1.8b,v2.8b,v3.8b}, [%0], #24 \n" // load 8 pixels of RGB24.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ MEMACCESS(1)
+ "st4 {v1.8b,v2.8b,v3.8b,v4.8b}, [%1], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v1", "v2", "v3", "v4" // Clobber List
+ );
+}
+#endif // HAS_RGB24TOARGBROW_NEON
+
+#ifdef HAS_RAWTOARGBROW_NEON
+void RAWToARGBRow_NEON(const uint8* src_raw, uint8* dst_argb, int pix) {
+ asm volatile (
+ "movi v5.8b, #255 \n" // Alpha
+ "1: \n"
+ MEMACCESS(0)
+ "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // read r g b
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "orr v3.8b, v1.8b, v1.8b \n" // move g
+ "orr v4.8b, v0.8b, v0.8b \n" // move r
+ MEMACCESS(1)
+ "st4 {v2.8b,v3.8b,v4.8b,v5.8b}, [%1], #32 \n" // store b g r a
+ "b.gt 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5" // Clobber List
+ );
+}
+#endif // HAS_RAWTOARGBROW_NEON
+
+#define RGB565TOARGB \
+ "shrn v6.8b, v0.8h, #5 \n" /* G xxGGGGGG */ \
+ "shl v6.8b, v6.8b, #2 \n" /* G GGGGGG00 upper 6 */ \
+ "ushr v4.8b, v6.8b, #6 \n" /* G 000000GG lower 2 */ \
+ "orr v1.8b, v4.8b, v6.8b \n" /* G */ \
+ "xtn v2.8b, v0.8h \n" /* B xxxBBBBB */ \
+ "ushr v0.8h, v0.8h, #11 \n" /* R 000RRRRR */ \
+ "xtn2 v2.16b,v0.8h \n" /* R in upper part */ \
+ "shl v2.16b, v2.16b, #3 \n" /* R,B BBBBB000 upper 5 */ \
+ "ushr v0.16b, v2.16b, #5 \n" /* R,B 00000BBB lower 3 */ \
+ "orr v0.16b, v0.16b, v2.16b \n" /* R,B */ \
+ "dup v2.2D, v0.D[1] \n" /* R */
+
+#ifdef HAS_RGB565TOARGBROW_NEON
+void RGB565ToARGBRow_NEON(const uint8* src_rgb565, uint8* dst_argb, int pix) {
+ asm volatile (
+ "movi v3.8b, #255 \n" // Alpha
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 RGB565 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ RGB565TOARGB
+ MEMACCESS(1)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_rgb565), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v6" // Clobber List
+ );
+}
+#endif // HAS_RGB565TOARGBROW_NEON
+
+#define ARGB1555TOARGB \
+ "ushr v2.8h, v0.8h, #10 \n" /* R xxxRRRRR */ \
+ "shl v2.8h, v2.8h, #3 \n" /* R RRRRR000 upper 5 */ \
+ "xtn v3.8b, v2.8h \n" /* RRRRR000 AAAAAAAA */ \
+ \
+ "sshr v2.8h, v0.8h, #15 \n" /* A AAAAAAAA */ \
+ "xtn2 v3.16b, v2.8h \n" \
+ \
+ "xtn v2.8b, v0.8h \n" /* B xxxBBBBB */ \
+ "shrn2 v2.16b,v0.8h, #5 \n" /* G xxxGGGGG */ \
+ \
+ "ushr v1.16b, v3.16b, #5 \n" /* R,A 00000RRR lower 3 */ \
+ "shl v0.16b, v2.16b, #3 \n" /* B,G BBBBB000 upper 5 */ \
+ "ushr v2.16b, v0.16b, #5 \n" /* B,G 00000BBB lower 3 */ \
+ \
+ "orr v0.16b, v0.16b, v2.16b \n" /* B,G */ \
+ "orr v2.16b, v1.16b, v3.16b \n" /* R,A */ \
+ "dup v1.2D, v0.D[1] \n" \
+ "dup v3.2D, v2.D[1] \n"
+
+// RGB555TOARGB is same as ARGB1555TOARGB but ignores alpha.
+#define RGB555TOARGB \
+ "ushr v2.8h, v0.8h, #10 \n" /* R xxxRRRRR */ \
+ "shl v2.8h, v2.8h, #3 \n" /* R RRRRR000 upper 5 */ \
+ "xtn v3.8b, v2.8h \n" /* RRRRR000 */ \
+ \
+ "xtn v2.8b, v0.8h \n" /* B xxxBBBBB */ \
+ "shrn2 v2.16b,v0.8h, #5 \n" /* G xxxGGGGG */ \
+ \
+ "ushr v1.16b, v3.16b, #5 \n" /* R 00000RRR lower 3 */ \
+ "shl v0.16b, v2.16b, #3 \n" /* B,G BBBBB000 upper 5 */ \
+ "ushr v2.16b, v0.16b, #5 \n" /* B,G 00000BBB lower 3 */ \
+ \
+ "orr v0.16b, v0.16b, v2.16b \n" /* B,G */ \
+ "orr v2.16b, v1.16b, v3.16b \n" /* R */ \
+ "dup v1.2D, v0.D[1] \n" /* G */ \
+
+#ifdef HAS_ARGB1555TOARGBROW_NEON
+void ARGB1555ToARGBRow_NEON(const uint8* src_argb1555, uint8* dst_argb,
+ int pix) {
+ asm volatile (
+ "movi v3.8b, #255 \n" // Alpha
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 ARGB1555 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGB1555TOARGB
+ MEMACCESS(1)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_argb1555), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+#endif // HAS_ARGB1555TOARGBROW_NEON
+
+#define ARGB4444TOARGB \
+ "shrn v1.8b, v0.8h, #8 \n" /* v1(l) AR */ \
+ "xtn2 v1.16b, v0.8h \n" /* v1(h) GB */ \
+ "shl v2.16b, v1.16b, #4 \n" /* B,R BBBB0000 */ \
+ "ushr v3.16b, v1.16b, #4 \n" /* G,A 0000GGGG */ \
+ "ushr v0.16b, v2.16b, #4 \n" /* B,R 0000BBBB */ \
+ "shl v1.16b, v3.16b, #4 \n" /* G,A GGGG0000 */ \
+ "orr v2.16b, v0.16b, v2.16b \n" /* B,R BBBBBBBB */ \
+ "orr v3.16b, v1.16b, v3.16b \n" /* G,A GGGGGGGG */ \
+ "dup v0.2D, v2.D[1] \n" \
+ "dup v1.2D, v3.D[1] \n"
+
+#ifdef HAS_ARGB4444TOARGBROW_NEON
+void ARGB4444ToARGBRow_NEON(const uint8* src_argb4444, uint8* dst_argb,
+ int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 ARGB4444 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGB4444TOARGB
+ MEMACCESS(1)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_argb4444), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4" // Clobber List
+ );
+}
+#endif // HAS_ARGB4444TOARGBROW_NEON
+
+#ifdef HAS_ARGBTORGB24ROW_NEON
+void ARGBToRGB24Row_NEON(const uint8* src_argb, uint8* dst_rgb24, int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v1.8b,v2.8b,v3.8b,v4.8b}, [%0], #32 \n" // load 8 ARGB pixels
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ MEMACCESS(1)
+ "st3 {v1.8b,v2.8b,v3.8b}, [%1], #24 \n" // store 8 pixels of RGB24.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_rgb24), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v1", "v2", "v3", "v4" // Clobber List
+ );
+}
+#endif // HAS_ARGBTORGB24ROW_NEON
+
+#ifdef HAS_ARGBTORAWROW_NEON
+void ARGBToRAWRow_NEON(const uint8* src_argb, uint8* dst_raw, int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v1.8b,v2.8b,v3.8b,v4.8b}, [%0], #32 \n" // load b g r a
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "orr v4.8b, v2.8b, v2.8b \n" // mov g
+ "orr v5.8b, v1.8b, v1.8b \n" // mov b
+ MEMACCESS(1)
+ "st3 {v3.8b,v4.8b,v5.8b}, [%1], #24 \n" // store r g b
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_raw), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v1", "v2", "v3", "v4", "v5" // Clobber List
+ );
+}
+#endif // HAS_ARGBTORAWROW_NEON
+
+#ifdef HAS_YUY2TOYROW_NEON
+void YUY2ToYRow_NEON(const uint8* src_yuy2, uint8* dst_y, int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld2 {v0.16b,v1.16b}, [%0], #32 \n" // load 16 pixels of YUY2.
+ "subs %w2, %w2, #16 \n" // 16 processed per loop.
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n" // store 16 pixels of Y.
+ "b.gt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1" // Clobber List
+ );
+}
+#endif // HAS_YUY2TOYROW_NEON
+
+#ifdef HAS_UYVYTOYROW_NEON
+void UYVYToYRow_NEON(const uint8* src_uyvy, uint8* dst_y, int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld2 {v0.16b,v1.16b}, [%0], #32 \n" // load 16 pixels of UYVY.
+ "subs %w2, %w2, #16 \n" // 16 processed per loop.
+ MEMACCESS(1)
+ "st1 {v1.16b}, [%1], #16 \n" // store 16 pixels of Y.
+ "b.gt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1" // Clobber List
+ );
+}
+#endif // HAS_UYVYTOYROW_NEON
+
+#ifdef HAS_YUY2TOUV422ROW_NEON
+void YUY2ToUV422Row_NEON(const uint8* src_yuy2, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 16 YUY2 pixels
+ "subs %w3, %w3, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "st1 {v1.8b}, [%1], #8 \n" // store 8 U.
+ MEMACCESS(2)
+ "st1 {v3.8b}, [%2], #8 \n" // store 8 V.
+ "b.gt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+#endif // HAS_YUY2TOUV422ROW_NEON
+
+#ifdef HAS_UYVYTOUV422ROW_NEON
+void UYVYToUV422Row_NEON(const uint8* src_uyvy, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 16 UYVY pixels
+ "subs %w3, %w3, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 U.
+ MEMACCESS(2)
+ "st1 {v2.8b}, [%2], #8 \n" // store 8 V.
+ "b.gt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+#endif // HAS_UYVYTOUV422ROW_NEON
+
+#ifdef HAS_YUY2TOUVROW_NEON
+void YUY2ToUVRow_NEON(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_yuy2b = src_yuy2 + stride_yuy2;
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 16 pixels
+ "subs %w4, %w4, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // load next row
+ "urhadd v1.8b, v1.8b, v5.8b \n" // average rows of U
+ "urhadd v3.8b, v3.8b, v7.8b \n" // average rows of V
+ MEMACCESS(2)
+ "st1 {v1.8b}, [%2], #8 \n" // store 8 U.
+ MEMACCESS(3)
+ "st1 {v3.8b}, [%3], #8 \n" // store 8 V.
+ "b.gt 1b \n"
+ : "+r"(src_yuy2), // %0
+ "+r"(src_yuy2b), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4",
+ "v5", "v6", "v7" // Clobber List
+ );
+}
+#endif // HAS_YUY2TOUVROW_NEON
+
+#ifdef HAS_UYVYTOUVROW_NEON
+void UYVYToUVRow_NEON(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_uyvyb = src_uyvy + stride_uyvy;
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 16 pixels
+ "subs %w4, %w4, #16 \n" // 16 pixels = 8 UVs.
+ MEMACCESS(1)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // load next row
+ "urhadd v0.8b, v0.8b, v4.8b \n" // average rows of U
+ "urhadd v2.8b, v2.8b, v6.8b \n" // average rows of V
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 U.
+ MEMACCESS(3)
+ "st1 {v2.8b}, [%3], #8 \n" // store 8 V.
+ "b.gt 1b \n"
+ : "+r"(src_uyvy), // %0
+ "+r"(src_uyvyb), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4",
+ "v5", "v6", "v7" // Clobber List
+ );
+}
+#endif // HAS_UYVYTOUVROW_NEON
+
+// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
+#ifdef HAS_ARGBSHUFFLEROW_NEON
+void ARGBShuffleRow_NEON(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ asm volatile (
+ MEMACCESS(3)
+ "ld1 {v2.16b}, [%3] \n" // shuffler
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 4 pixels.
+ "subs %w2, %w2, #4 \n" // 4 processed per loop
+ "tbl v1.16b, {v0.16b}, v2.16b \n" // look up 4 pixels
+ MEMACCESS(1)
+ "st1 {v1.16b}, [%1], #16 \n" // store 4.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(pix) // %2
+ : "r"(shuffler) // %3
+ : "cc", "memory", "v0", "v1", "v2" // Clobber List
+ );
+}
+#endif // HAS_ARGBSHUFFLEROW_NEON
+
+#ifdef HAS_I422TOYUY2ROW_NEON
+void I422ToYUY2Row_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_yuy2, int width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld2 {v0.8b, v1.8b}, [%0], #16 \n" // load 16 Ys
+ "orr v2.8b, v1.8b, v1.8b \n"
+ MEMACCESS(1)
+ "ld1 {v1.8b}, [%1], #8 \n" // load 8 Us
+ MEMACCESS(2)
+ "ld1 {v3.8b}, [%2], #8 \n" // load 8 Vs
+ "subs %w4, %w4, #16 \n" // 16 pixels
+ MEMACCESS(3)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%3], #32 \n" // Store 16 pixels.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_yuy2), // %3
+ "+r"(width) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3"
+ );
+}
+#endif // HAS_I422TOYUY2ROW_NEON
+
+#ifdef HAS_I422TOUYVYROW_NEON
+void I422ToUYVYRow_NEON(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_uyvy, int width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld2 {v1.8b,v2.8b}, [%0], #16 \n" // load 16 Ys
+ "orr v3.8b, v2.8b, v2.8b \n"
+ MEMACCESS(1)
+ "ld1 {v0.8b}, [%1], #8 \n" // load 8 Us
+ MEMACCESS(2)
+ "ld1 {v2.8b}, [%2], #8 \n" // load 8 Vs
+ "subs %w4, %w4, #16 \n" // 16 pixels
+ MEMACCESS(3)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%3], #32 \n" // Store 16 pixels.
+ "b.gt 1b \n"
+ : "+r"(src_y), // %0
+ "+r"(src_u), // %1
+ "+r"(src_v), // %2
+ "+r"(dst_uyvy), // %3
+ "+r"(width) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3"
+ );
+}
+#endif // HAS_I422TOUYVYROW_NEON
+
+#ifdef HAS_ARGBTORGB565ROW_NEON
+void ARGBToRGB565Row_NEON(const uint8* src_argb, uint8* dst_rgb565, int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%0], #32 \n" // load 8 pixels
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGBTORGB565
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n" // store 8 pixels RGB565.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_rgb565), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v20", "v21", "v22", "v23"
+ );
+}
+#endif // HAS_ARGBTORGB565ROW_NEON
+
+#ifdef HAS_ARGBTORGB565DITHERROW_NEON
+void ARGBToRGB565DitherRow_NEON(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int width) {
+ asm volatile (
+ "dup v1.4s, %w2 \n" // dither4
+ "1: \n"
+ MEMACCESS(1)
+ "ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%1], #32 \n" // load 8 pixels
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "uqadd v20.8b, v20.8b, v1.8b \n"
+ "uqadd v21.8b, v21.8b, v1.8b \n"
+ "uqadd v22.8b, v22.8b, v1.8b \n"
+ ARGBTORGB565
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n" // store 8 pixels RGB565.
+ "b.gt 1b \n"
+ : "+r"(dst_rgb) // %0
+ : "r"(src_argb), // %1
+ "r"(dither4), // %2
+ "r"(width) // %3
+ : "cc", "memory", "v0", "v1", "v20", "v21", "v22", "v23"
+ );
+}
+#endif // HAS_ARGBTORGB565ROW_NEON
+
+#ifdef HAS_ARGBTOARGB1555ROW_NEON
+void ARGBToARGB1555Row_NEON(const uint8* src_argb, uint8* dst_argb1555,
+ int pix) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%0], #32 \n" // load 8 pixels
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGBTOARGB1555
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n" // store 8 pixels ARGB1555.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb1555), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v20", "v21", "v22", "v23"
+ );
+}
+#endif // HAS_ARGBTOARGB1555ROW_NEON
+
+#ifdef HAS_ARGBTOARGB4444ROW_NEON
+void ARGBToARGB4444Row_NEON(const uint8* src_argb, uint8* dst_argb4444,
+ int pix) {
+ asm volatile (
+ "movi v4.16b, #0x0f \n" // bits to clear with vbic.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v20.8b,v21.8b,v22.8b,v23.8b}, [%0], #32 \n" // load 8 pixels
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGBTOARGB4444
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n" // store 8 pixels ARGB4444.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb4444), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v4", "v20", "v21", "v22", "v23"
+ );
+}
+#endif // HAS_ARGBTOARGB4444ROW_NEON
+
+#ifdef HAS_ARGBTOYROW_NEON
+void ARGBToYRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v3.8h, v0.8b, v4.8b \n" // B
+ "umlal v3.8h, v1.8b, v5.8b \n" // G
+ "umlal v3.8h, v2.8b, v6.8b \n" // R
+ "sqrshrun v0.8b, v3.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
+ );
+}
+#endif // HAS_ARGBTOYROW_NEON
+
+#ifdef HAS_ARGBTOYJROW_NEON
+void ARGBToYJRow_NEON(const uint8* src_argb, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #15 \n" // B * 0.11400 coefficient
+ "movi v5.8b, #75 \n" // G * 0.58700 coefficient
+ "movi v6.8b, #38 \n" // R * 0.29900 coefficient
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v3.8h, v0.8b, v4.8b \n" // B
+ "umlal v3.8h, v1.8b, v5.8b \n" // G
+ "umlal v3.8h, v2.8b, v6.8b \n" // R
+ "sqrshrun v0.8b, v3.8h, #7 \n" // 15 bit to 8 bit Y
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"
+ );
+}
+#endif // HAS_ARGBTOYJROW_NEON
+
+// 8x1 pixels.
+#ifdef HAS_ARGBTOUV444ROW_NEON
+void ARGBToUV444Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ "movi v24.8b, #112 \n" // UB / VR 0.875 coefficient
+ "movi v25.8b, #74 \n" // UG -0.5781 coefficient
+ "movi v26.8b, #38 \n" // UR -0.2969 coefficient
+ "movi v27.8b, #18 \n" // VB -0.1406 coefficient
+ "movi v28.8b, #94 \n" // VG -0.7344 coefficient
+ "movi v29.16b,#0x80 \n" // 128.5
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "umull v4.8h, v0.8b, v24.8b \n" // B
+ "umlsl v4.8h, v1.8b, v25.8b \n" // G
+ "umlsl v4.8h, v2.8b, v26.8b \n" // R
+ "add v4.8h, v4.8h, v29.8h \n" // +128 -> unsigned
+
+ "umull v3.8h, v2.8b, v24.8b \n" // R
+ "umlsl v3.8h, v1.8b, v28.8b \n" // G
+ "umlsl v3.8h, v0.8b, v27.8b \n" // B
+ "add v3.8h, v3.8h, v29.8h \n" // +128 -> unsigned
+
+ "uqshrn v0.8b, v4.8h, #8 \n" // 16 bit to 8 bit U
+ "uqshrn v1.8b, v3.8h, #8 \n" // 16 bit to 8 bit V
+
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels U.
+ MEMACCESS(2)
+ "st1 {v1.8b}, [%2], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4",
+ "v24", "v25", "v26", "v27", "v28", "v29"
+ );
+}
+#endif // HAS_ARGBTOUV444ROW_NEON
+
+// 16x1 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+#ifdef HAS_ARGBTOUV422ROW_NEON
+void ARGBToUV422Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+
+ "subs %w3, %w3, #16 \n" // 16 processed per loop.
+ "mul v3.8h, v0.8h, v20.8h \n" // B
+ "mls v3.8h, v1.8h, v21.8h \n" // G
+ "mls v3.8h, v2.8h, v22.8h \n" // R
+ "add v3.8h, v3.8h, v25.8h \n" // +128 -> unsigned
+
+ "mul v4.8h, v2.8h, v20.8h \n" // R
+ "mls v4.8h, v1.8h, v24.8h \n" // G
+ "mls v4.8h, v0.8h, v23.8h \n" // B
+ "add v4.8h, v4.8h, v25.8h \n" // +128 -> unsigned
+
+ "uqshrn v0.8b, v3.8h, #8 \n" // 16 bit to 8 bit U
+ "uqshrn v1.8b, v4.8h, #8 \n" // 16 bit to 8 bit V
+
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels U.
+ MEMACCESS(2)
+ "st1 {v1.8b}, [%2], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_ARGBTOUV422ROW_NEON
+
+// 32x1 pixels -> 8x1. pix is number of argb pixels. e.g. 32.
+#ifdef HAS_ARGBTOUV411ROW_NEON
+void ARGBToUV411Row_NEON(const uint8* src_argb, uint8* dst_u, uint8* dst_v,
+ int pix) {
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(0)
+ "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%0], #64 \n" // load next 16.
+ "uaddlp v4.8h, v4.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v5.8h, v5.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v6.8h, v6.16b \n" // R 16 bytes -> 8 shorts.
+
+ "addp v0.8h, v0.8h, v4.8h \n" // B 16 shorts -> 8 shorts.
+ "addp v1.8h, v1.8h, v5.8h \n" // G 16 shorts -> 8 shorts.
+ "addp v2.8h, v2.8h, v6.8h \n" // R 16 shorts -> 8 shorts.
+
+ "urshr v0.8h, v0.8h, #1 \n" // 2x average
+ "urshr v1.8h, v1.8h, #1 \n"
+ "urshr v2.8h, v2.8h, #1 \n"
+
+ "subs %w3, %w3, #32 \n" // 32 processed per loop.
+ "mul v3.8h, v0.8h, v20.8h \n" // B
+ "mls v3.8h, v1.8h, v21.8h \n" // G
+ "mls v3.8h, v2.8h, v22.8h \n" // R
+ "add v3.8h, v3.8h, v25.8h \n" // +128 -> unsigned
+ "mul v4.8h, v2.8h, v20.8h \n" // R
+ "mls v4.8h, v1.8h, v24.8h \n" // G
+ "mls v4.8h, v0.8h, v23.8h \n" // B
+ "add v4.8h, v4.8h, v25.8h \n" // +128 -> unsigned
+ "uqshrn v0.8b, v3.8h, #8 \n" // 16 bit to 8 bit U
+ "uqshrn v1.8b, v4.8h, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels U.
+ MEMACCESS(2)
+ "st1 {v1.8b}, [%2], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_u), // %1
+ "+r"(dst_v), // %2
+ "+r"(pix) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_ARGBTOUV411ROW_NEON
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+#define RGBTOUV(QB, QG, QR) \
+ "mul v3.8h, " #QB ",v20.8h \n" /* B */ \
+ "mul v4.8h, " #QR ",v20.8h \n" /* R */ \
+ "mls v3.8h, " #QG ",v21.8h \n" /* G */ \
+ "mls v4.8h, " #QG ",v24.8h \n" /* G */ \
+ "mls v3.8h, " #QR ",v22.8h \n" /* R */ \
+ "mls v4.8h, " #QB ",v23.8h \n" /* B */ \
+ "add v3.8h, v3.8h, v25.8h \n" /* +128 -> unsigned */ \
+ "add v4.8h, v4.8h, v25.8h \n" /* +128 -> unsigned */ \
+ "uqshrn v0.8b, v3.8h, #8 \n" /* 16 bit to 8 bit U */ \
+ "uqshrn v1.8b, v4.8h, #8 \n" /* 16 bit to 8 bit V */
+
+// TODO(fbarchard): Consider vhadd vertical, then vpaddl horizontal, avoid shr.
+// TODO(fbarchard): consider ptrdiff_t for all strides.
+
+#ifdef HAS_ARGBTOUVROW_NEON
+void ARGBToUVRow_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_argb_1 = src_argb + src_stride_argb;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+
+ MEMACCESS(1)
+ "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load next 16
+ "uadalp v0.8h, v4.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v6.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v0.8h, v0.8h, #1 \n" // 2x average
+ "urshr v1.8h, v1.8h, #1 \n"
+ "urshr v2.8h, v2.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v0.8h, v1.8h, v2.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_argb_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_ARGBTOUVROW_NEON
+
+// TODO(fbarchard): Subsample match C code.
+#ifdef HAS_ARGBTOUVJROW_NEON
+void ARGBToUVJRow_NEON(const uint8* src_argb, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_argb_1 = src_argb + src_stride_argb;
+ asm volatile (
+ "movi v20.8h, #63, lsl #0 \n" // UB/VR coeff (0.500) / 2
+ "movi v21.8h, #42, lsl #0 \n" // UG coeff (-0.33126) / 2
+ "movi v22.8h, #21, lsl #0 \n" // UR coeff (-0.16874) / 2
+ "movi v23.8h, #10, lsl #0 \n" // VB coeff (-0.08131) / 2
+ "movi v24.8h, #53, lsl #0 \n" // VG coeff (-0.41869) / 2
+ "movi v25.16b, #0x80 \n" // 128.5 (0x8080 in 16-bit)
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load next 16
+ "uadalp v0.8h, v4.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v6.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v0.8h, v0.8h, #1 \n" // 2x average
+ "urshr v1.8h, v1.8h, #1 \n"
+ "urshr v2.8h, v2.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v0.8h, v1.8h, v2.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_argb_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_ARGBTOUVJROW_NEON
+
+#ifdef HAS_BGRATOUVROW_NEON
+void BGRAToUVRow_NEON(const uint8* src_bgra, int src_stride_bgra,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_bgra_1 = src_bgra + src_stride_bgra;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+ "uaddlp v0.8h, v3.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v3.8h, v2.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v1.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load 16 more
+ "uadalp v0.8h, v7.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v3.8h, v6.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v5.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v0.8h, v0.8h, #1 \n" // 2x average
+ "urshr v1.8h, v3.8h, #1 \n"
+ "urshr v2.8h, v2.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v0.8h, v1.8h, v2.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_bgra), // %0
+ "+r"(src_bgra_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_BGRATOUVROW_NEON
+
+#ifdef HAS_ABGRTOUVROW_NEON
+void ABGRToUVRow_NEON(const uint8* src_abgr, int src_stride_abgr,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_abgr_1 = src_abgr + src_stride_abgr;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+ "uaddlp v3.8h, v2.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v0.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load 16 more.
+ "uadalp v3.8h, v6.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v5.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v4.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v0.8h, v3.8h, #1 \n" // 2x average
+ "urshr v2.8h, v2.8h, #1 \n"
+ "urshr v1.8h, v1.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v0.8h, v2.8h, v1.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_abgr), // %0
+ "+r"(src_abgr_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_ABGRTOUVROW_NEON
+
+#ifdef HAS_RGBATOUVROW_NEON
+void RGBAToUVRow_NEON(const uint8* src_rgba, int src_stride_rgba,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_rgba_1 = src_rgba + src_stride_rgba;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 16 pixels.
+ "uaddlp v0.8h, v1.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v2.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v3.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "ld4 {v4.16b,v5.16b,v6.16b,v7.16b}, [%1], #64 \n" // load 16 more.
+ "uadalp v0.8h, v5.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v6.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v7.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v0.8h, v0.8h, #1 \n" // 2x average
+ "urshr v1.8h, v1.8h, #1 \n"
+ "urshr v2.8h, v2.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v0.8h, v1.8h, v2.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_rgba), // %0
+ "+r"(src_rgba_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_RGBATOUVROW_NEON
+
+#ifdef HAS_RGB24TOUVROW_NEON
+void RGB24ToUVRow_NEON(const uint8* src_rgb24, int src_stride_rgb24,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_rgb24_1 = src_rgb24 + src_stride_rgb24;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld3 {v0.16b,v1.16b,v2.16b}, [%0], #48 \n" // load 16 pixels.
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "ld3 {v4.16b,v5.16b,v6.16b}, [%1], #48 \n" // load 16 more.
+ "uadalp v0.8h, v4.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v6.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v0.8h, v0.8h, #1 \n" // 2x average
+ "urshr v1.8h, v1.8h, #1 \n"
+ "urshr v2.8h, v2.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v0.8h, v1.8h, v2.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(src_rgb24_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_RGB24TOUVROW_NEON
+
+#ifdef HAS_RAWTOUVROW_NEON
+void RAWToUVRow_NEON(const uint8* src_raw, int src_stride_raw,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_raw_1 = src_raw + src_stride_raw;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld3 {v0.16b,v1.16b,v2.16b}, [%0], #48 \n" // load 8 RAW pixels.
+ "uaddlp v2.8h, v2.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v0.8h, v0.16b \n" // R 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "ld3 {v4.16b,v5.16b,v6.16b}, [%1], #48 \n" // load 8 more RAW pixels
+ "uadalp v2.8h, v6.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v5.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v0.8h, v4.16b \n" // R 16 bytes -> 8 shorts.
+
+ "urshr v2.8h, v2.8h, #1 \n" // 2x average
+ "urshr v1.8h, v1.8h, #1 \n"
+ "urshr v0.8h, v0.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 32 processed per loop.
+ RGBTOUV(v2.8h, v1.8h, v0.8h)
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(src_raw_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_RAWTOUVROW_NEON
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+#ifdef HAS_RGB565TOUVROW_NEON
+void RGB565ToUVRow_NEON(const uint8* src_rgb565, int src_stride_rgb565,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_rgb565_1 = src_rgb565 + src_stride_rgb565;
+ asm volatile (
+ "movi v22.8h, #56, lsl #0 \n" // UB / VR coeff (0.875) / 2
+ "movi v23.8h, #37, lsl #0 \n" // UG coeff (-0.5781) / 2
+ "movi v24.8h, #19, lsl #0 \n" // UR coeff (-0.2969) / 2
+ "movi v25.8h, #9 , lsl #0 \n" // VB coeff (-0.1406) / 2
+ "movi v26.8h, #47, lsl #0 \n" // VG coeff (-0.7344) / 2
+ "movi v27.16b, #0x80 \n" // 128.5 (0x8080 in 16-bit)
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 RGB565 pixels.
+ RGB565TOARGB
+ "uaddlp v16.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uaddlp v18.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uaddlp v20.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // next 8 RGB565 pixels.
+ RGB565TOARGB
+ "uaddlp v17.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uaddlp v19.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uaddlp v21.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n" // load 8 RGB565 pixels.
+ RGB565TOARGB
+ "uadalp v16.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uadalp v18.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uadalp v20.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n" // next 8 RGB565 pixels.
+ RGB565TOARGB
+ "uadalp v17.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uadalp v19.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uadalp v21.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+
+ "ins v16.D[1], v17.D[0] \n"
+ "ins v18.D[1], v19.D[0] \n"
+ "ins v20.D[1], v21.D[0] \n"
+
+ "urshr v4.8h, v16.8h, #1 \n" // 2x average
+ "urshr v5.8h, v18.8h, #1 \n"
+ "urshr v6.8h, v20.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 16 processed per loop.
+ "mul v16.8h, v4.8h, v22.8h \n" // B
+ "mls v16.8h, v5.8h, v23.8h \n" // G
+ "mls v16.8h, v6.8h, v24.8h \n" // R
+ "add v16.8h, v16.8h, v27.8h \n" // +128 -> unsigned
+ "mul v17.8h, v6.8h, v22.8h \n" // R
+ "mls v17.8h, v5.8h, v26.8h \n" // G
+ "mls v17.8h, v4.8h, v25.8h \n" // B
+ "add v17.8h, v17.8h, v27.8h \n" // +128 -> unsigned
+ "uqshrn v0.8b, v16.8h, #8 \n" // 16 bit to 8 bit U
+ "uqshrn v1.8b, v17.8h, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_rgb565), // %0
+ "+r"(src_rgb565_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24",
+ "v25", "v26", "v27"
+ );
+}
+#endif // HAS_RGB565TOUVROW_NEON
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+#ifdef HAS_ARGB1555TOUVROW_NEON
+void ARGB1555ToUVRow_NEON(const uint8* src_argb1555, int src_stride_argb1555,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_argb1555_1 = src_argb1555 + src_stride_argb1555;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "uaddlp v16.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uaddlp v17.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uaddlp v18.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // next 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "uaddlp v26.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uaddlp v27.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uaddlp v28.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n" // load 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "uadalp v16.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uadalp v17.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uadalp v18.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n" // next 8 ARGB1555 pixels.
+ RGB555TOARGB
+ "uadalp v26.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uadalp v27.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uadalp v28.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+
+ "ins v16.D[1], v26.D[0] \n"
+ "ins v17.D[1], v27.D[0] \n"
+ "ins v18.D[1], v28.D[0] \n"
+
+ "urshr v4.8h, v16.8h, #1 \n" // 2x average
+ "urshr v5.8h, v17.8h, #1 \n"
+ "urshr v6.8h, v18.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 16 processed per loop.
+ "mul v2.8h, v4.8h, v20.8h \n" // B
+ "mls v2.8h, v5.8h, v21.8h \n" // G
+ "mls v2.8h, v6.8h, v22.8h \n" // R
+ "add v2.8h, v2.8h, v25.8h \n" // +128 -> unsigned
+ "mul v3.8h, v6.8h, v20.8h \n" // R
+ "mls v3.8h, v5.8h, v24.8h \n" // G
+ "mls v3.8h, v4.8h, v23.8h \n" // B
+ "add v3.8h, v3.8h, v25.8h \n" // +128 -> unsigned
+ "uqshrn v0.8b, v2.8h, #8 \n" // 16 bit to 8 bit U
+ "uqshrn v1.8b, v3.8h, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb1555), // %0
+ "+r"(src_argb1555_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
+ "v26", "v27", "v28"
+ );
+}
+#endif // HAS_ARGB1555TOUVROW_NEON
+
+// 16x2 pixels -> 8x1. pix is number of argb pixels. e.g. 16.
+#ifdef HAS_ARGB4444TOUVROW_NEON
+void ARGB4444ToUVRow_NEON(const uint8* src_argb4444, int src_stride_argb4444,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ const uint8* src_argb4444_1 = src_argb4444 + src_stride_argb4444;
+ asm volatile (
+ RGBTOUV_SETUP_REG
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "uaddlp v16.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uaddlp v17.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uaddlp v18.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // next 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "uaddlp v26.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uaddlp v27.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uaddlp v28.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n" // load 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "uadalp v16.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uadalp v17.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uadalp v18.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n" // next 8 ARGB4444 pixels.
+ ARGB4444TOARGB
+ "uadalp v26.4h, v0.8b \n" // B 8 bytes -> 4 shorts.
+ "uadalp v27.4h, v1.8b \n" // G 8 bytes -> 4 shorts.
+ "uadalp v28.4h, v2.8b \n" // R 8 bytes -> 4 shorts.
+
+ "ins v16.D[1], v26.D[0] \n"
+ "ins v17.D[1], v27.D[0] \n"
+ "ins v18.D[1], v28.D[0] \n"
+
+ "urshr v4.8h, v16.8h, #1 \n" // 2x average
+ "urshr v5.8h, v17.8h, #1 \n"
+ "urshr v6.8h, v18.8h, #1 \n"
+
+ "subs %w4, %w4, #16 \n" // 16 processed per loop.
+ "mul v2.8h, v4.8h, v20.8h \n" // B
+ "mls v2.8h, v5.8h, v21.8h \n" // G
+ "mls v2.8h, v6.8h, v22.8h \n" // R
+ "add v2.8h, v2.8h, v25.8h \n" // +128 -> unsigned
+ "mul v3.8h, v6.8h, v20.8h \n" // R
+ "mls v3.8h, v5.8h, v24.8h \n" // G
+ "mls v3.8h, v4.8h, v23.8h \n" // B
+ "add v3.8h, v3.8h, v25.8h \n" // +128 -> unsigned
+ "uqshrn v0.8b, v2.8h, #8 \n" // 16 bit to 8 bit U
+ "uqshrn v1.8b, v3.8h, #8 \n" // 16 bit to 8 bit V
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 pixels U.
+ MEMACCESS(3)
+ "st1 {v1.8b}, [%3], #8 \n" // store 8 pixels V.
+ "b.gt 1b \n"
+ : "+r"(src_argb4444), // %0
+ "+r"(src_argb4444_1), // %1
+ "+r"(dst_u), // %2
+ "+r"(dst_v), // %3
+ "+r"(pix) // %4
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6",
+ "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25",
+ "v26", "v27", "v28"
+
+ );
+}
+#endif // HAS_ARGB4444TOUVROW_NEON
+
+#ifdef HAS_RGB565TOYROW_NEON
+void RGB565ToYRow_NEON(const uint8* src_rgb565, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v24.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v25.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v26.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v27.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 RGB565 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ RGB565TOARGB
+ "umull v3.8h, v0.8b, v24.8b \n" // B
+ "umlal v3.8h, v1.8b, v25.8b \n" // G
+ "umlal v3.8h, v2.8b, v26.8b \n" // R
+ "sqrshrun v0.8b, v3.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v27.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_rgb565), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v6",
+ "v24", "v25", "v26", "v27"
+ );
+}
+#endif // HAS_RGB565TOYROW_NEON
+
+#ifdef HAS_ARGB1555TOYROW_NEON
+void ARGB1555ToYRow_NEON(const uint8* src_argb1555, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 ARGB1555 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGB1555TOARGB
+ "umull v3.8h, v0.8b, v4.8b \n" // B
+ "umlal v3.8h, v1.8b, v5.8b \n" // G
+ "umlal v3.8h, v2.8b, v6.8b \n" // R
+ "sqrshrun v0.8b, v3.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_argb1555), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
+ );
+}
+#endif // HAS_ARGB1555TOYROW_NEON
+
+#ifdef HAS_ARGB4444TOYROW_NEON
+void ARGB4444ToYRow_NEON(const uint8* src_argb4444, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v24.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v25.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v26.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v27.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 8 ARGB4444 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ ARGB4444TOARGB
+ "umull v3.8h, v0.8b, v24.8b \n" // B
+ "umlal v3.8h, v1.8b, v25.8b \n" // G
+ "umlal v3.8h, v2.8b, v26.8b \n" // R
+ "sqrshrun v0.8b, v3.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v27.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_argb4444), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v24", "v25", "v26", "v27"
+ );
+}
+#endif // HAS_ARGB4444TOYROW_NEON
+
+#ifdef HAS_BGRATOYROW_NEON
+void BGRAToYRow_NEON(const uint8* src_bgra, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v16.8h, v1.8b, v4.8b \n" // R
+ "umlal v16.8h, v2.8b, v5.8b \n" // G
+ "umlal v16.8h, v3.8b, v6.8b \n" // B
+ "sqrshrun v0.8b, v16.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_bgra), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
+ );
+}
+#endif // HAS_BGRATOYROW_NEON
+
+#ifdef HAS_ABGRTOYROW_NEON
+void ABGRToYRow_NEON(const uint8* src_abgr, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v16.8h, v0.8b, v4.8b \n" // R
+ "umlal v16.8h, v1.8b, v5.8b \n" // G
+ "umlal v16.8h, v2.8b, v6.8b \n" // B
+ "sqrshrun v0.8b, v16.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_abgr), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
+ );
+}
+#endif // HAS_ABGRTOYROW_NEON
+
+#ifdef HAS_RGBATOYROW_NEON
+void RGBAToYRow_NEON(const uint8* src_rgba, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v16.8h, v1.8b, v4.8b \n" // B
+ "umlal v16.8h, v2.8b, v5.8b \n" // G
+ "umlal v16.8h, v3.8b, v6.8b \n" // R
+ "sqrshrun v0.8b, v16.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_rgba), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
+ );
+}
+#endif // HAS_RGBATOYROW_NEON
+
+#ifdef HAS_RGB24TOYROW_NEON
+void RGB24ToYRow_NEON(const uint8* src_rgb24, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // load 8 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v16.8h, v0.8b, v4.8b \n" // B
+ "umlal v16.8h, v1.8b, v5.8b \n" // G
+ "umlal v16.8h, v2.8b, v6.8b \n" // R
+ "sqrshrun v0.8b, v16.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_rgb24), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
+ );
+}
+#endif // HAS_RGB24TOYROW_NEON
+
+#ifdef HAS_RAWTOYROW_NEON
+void RAWToYRow_NEON(const uint8* src_raw, uint8* dst_y, int pix) {
+ asm volatile (
+ "movi v4.8b, #33 \n" // R * 0.2578 coefficient
+ "movi v5.8b, #65 \n" // G * 0.5078 coefficient
+ "movi v6.8b, #13 \n" // B * 0.1016 coefficient
+ "movi v7.8b, #16 \n" // Add 16 constant
+ "1: \n"
+ MEMACCESS(0)
+ "ld3 {v0.8b,v1.8b,v2.8b}, [%0], #24 \n" // load 8 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v16.8h, v0.8b, v4.8b \n" // B
+ "umlal v16.8h, v1.8b, v5.8b \n" // G
+ "umlal v16.8h, v2.8b, v6.8b \n" // R
+ "sqrshrun v0.8b, v16.8h, #7 \n" // 16 bit to 8 bit Y
+ "uqadd v0.8b, v0.8b, v7.8b \n"
+ MEMACCESS(1)
+ "st1 {v0.8b}, [%1], #8 \n" // store 8 pixels Y.
+ "b.gt 1b \n"
+ : "+r"(src_raw), // %0
+ "+r"(dst_y), // %1
+ "+r"(pix) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
+ );
+}
+#endif // HAS_RAWTOYROW_NEON
+
+// Bilinear filter 16x2 -> 16x1
+#ifdef HAS_INTERPOLATEROW_NEON
+void InterpolateRow_NEON(uint8* dst_ptr,
+ const uint8* src_ptr, ptrdiff_t src_stride,
+ int dst_width, int source_y_fraction) {
+ int y1_fraction = source_y_fraction;
+ int y0_fraction = 256 - y1_fraction;
+ const uint8* src_ptr1 = src_ptr + src_stride;
+ asm volatile (
+ "cmp %w4, #0 \n"
+ "b.eq 100f \n"
+ "cmp %w4, #64 \n"
+ "b.eq 75f \n"
+ "cmp %w4, #128 \n"
+ "b.eq 50f \n"
+ "cmp %w4, #192 \n"
+ "b.eq 25f \n"
+
+ "dup v5.16b, %w4 \n"
+ "dup v4.16b, %w5 \n"
+ // General purpose row blend.
+ "1: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "umull v2.8h, v0.8b, v4.8b \n"
+ "umull2 v3.8h, v0.16b, v4.16b \n"
+ "umlal v2.8h, v1.8b, v5.8b \n"
+ "umlal2 v3.8h, v1.16b, v5.16b \n"
+ "rshrn v0.8b, v2.8h, #8 \n"
+ "rshrn2 v0.16b, v3.8h, #8 \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 1b \n"
+ "b 99f \n"
+
+ // Blend 25 / 75.
+ "25: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 25b \n"
+ "b 99f \n"
+
+ // Blend 50 / 50.
+ "50: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 50b \n"
+ "b 99f \n"
+
+ // Blend 75 / 25.
+ "75: \n"
+ MEMACCESS(1)
+ "ld1 {v1.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v0.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 75b \n"
+ "b 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ "100: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 100b \n"
+
+ "99: \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(src_ptr1), // %2
+ "+r"(dst_width), // %3
+ "+r"(y1_fraction), // %4
+ "+r"(y0_fraction) // %5
+ :
+ : "cc", "memory", "v0", "v1", "v3", "v4", "v5"
+ );
+}
+#endif // HAS_INTERPOLATEROW_NEON
+
+// dr * (256 - sa) / 256 + sr = dr - dr * sa / 256 + sr
+#ifdef HAS_ARGBBLENDROW_NEON
+void ARGBBlendRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "subs %w3, %w3, #8 \n"
+ "b.lt 89f \n"
+ // Blend 8 pixels.
+ "8: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB0 pixels
+ MEMACCESS(1)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // load 8 ARGB1 pixels
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "umull v16.8h, v4.8b, v3.8b \n" // db * a
+ "umull v17.8h, v5.8b, v3.8b \n" // dg * a
+ "umull v18.8h, v6.8b, v3.8b \n" // dr * a
+ "uqrshrn v16.8b, v16.8h, #8 \n" // db >>= 8
+ "uqrshrn v17.8b, v17.8h, #8 \n" // dg >>= 8
+ "uqrshrn v18.8b, v18.8h, #8 \n" // dr >>= 8
+ "uqsub v4.8b, v4.8b, v16.8b \n" // db - (db * a / 256)
+ "uqsub v5.8b, v5.8b, v17.8b \n" // dg - (dg * a / 256)
+ "uqsub v6.8b, v6.8b, v18.8b \n" // dr - (dr * a / 256)
+ "uqadd v0.8b, v0.8b, v4.8b \n" // + sb
+ "uqadd v1.8b, v1.8b, v5.8b \n" // + sg
+ "uqadd v2.8b, v2.8b, v6.8b \n" // + sr
+ "movi v3.8b, #255 \n" // a = 255
+ MEMACCESS(2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n" // store 8 ARGB pixels
+ "b.ge 8b \n"
+
+ "89: \n"
+ "adds %w3, %w3, #8-1 \n"
+ "b.lt 99f \n"
+
+ // Blend 1 pixels.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.b,v1.b,v2.b,v3.b}[0], [%0], #4 \n" // load 1 pixel ARGB0.
+ MEMACCESS(1)
+ "ld4 {v4.b,v5.b,v6.b,v7.b}[0], [%1], #4 \n" // load 1 pixel ARGB1.
+ "subs %w3, %w3, #1 \n" // 1 processed per loop.
+ "umull v16.8h, v4.8b, v3.8b \n" // db * a
+ "umull v17.8h, v5.8b, v3.8b \n" // dg * a
+ "umull v18.8h, v6.8b, v3.8b \n" // dr * a
+ "uqrshrn v16.8b, v16.8h, #8 \n" // db >>= 8
+ "uqrshrn v17.8b, v17.8h, #8 \n" // dg >>= 8
+ "uqrshrn v18.8b, v18.8h, #8 \n" // dr >>= 8
+ "uqsub v4.8b, v4.8b, v16.8b \n" // db - (db * a / 256)
+ "uqsub v5.8b, v5.8b, v17.8b \n" // dg - (dg * a / 256)
+ "uqsub v6.8b, v6.8b, v18.8b \n" // dr - (dr * a / 256)
+ "uqadd v0.8b, v0.8b, v4.8b \n" // + sb
+ "uqadd v1.8b, v1.8b, v5.8b \n" // + sg
+ "uqadd v2.8b, v2.8b, v6.8b \n" // + sr
+ "movi v3.8b, #255 \n" // a = 255
+ MEMACCESS(2)
+ "st4 {v0.b,v1.b,v2.b,v3.b}[0], [%2], #4 \n" // store 1 pixel.
+ "b.ge 1b \n"
+
+ "99: \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v16", "v17", "v18"
+ );
+}
+#endif // HAS_ARGBBLENDROW_NEON
+
+// Attenuate 8 pixels at a time.
+#ifdef HAS_ARGBATTENUATEROW_NEON
+void ARGBAttenuateRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ // Attenuate 8 pixels.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v4.8h, v0.8b, v3.8b \n" // b * a
+ "umull v5.8h, v1.8b, v3.8b \n" // g * a
+ "umull v6.8h, v2.8b, v3.8b \n" // r * a
+ "uqrshrn v0.8b, v4.8h, #8 \n" // b >>= 8
+ "uqrshrn v1.8b, v5.8h, #8 \n" // g >>= 8
+ "uqrshrn v2.8b, v6.8h, #8 \n" // r >>= 8
+ MEMACCESS(1)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"
+ );
+}
+#endif // HAS_ARGBATTENUATEROW_NEON
+
+// Quantize 8 ARGB pixels (32 bytes).
+// dst = (dst * scale >> 16) * interval_size + interval_offset;
+#ifdef HAS_ARGBQUANTIZEROW_NEON
+void ARGBQuantizeRow_NEON(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width) {
+ asm volatile (
+ "dup v4.8h, %w2 \n"
+ "ushr v4.8h, v4.8h, #1 \n" // scale >>= 1
+ "dup v5.8h, %w3 \n" // interval multiply.
+ "dup v6.8h, %w4 \n" // interval add
+
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0] \n" // load 8 pixels of ARGB.
+ "subs %w1, %w1, #8 \n" // 8 processed per loop.
+ "uxtl v0.8h, v0.8b \n" // b (0 .. 255)
+ "uxtl v1.8h, v1.8b \n"
+ "uxtl v2.8h, v2.8b \n"
+ "sqdmulh v0.8h, v0.8h, v4.8h \n" // b * scale
+ "sqdmulh v1.8h, v1.8h, v4.8h \n" // g
+ "sqdmulh v2.8h, v2.8h, v4.8h \n" // r
+ "mul v0.8h, v0.8h, v5.8h \n" // b * interval_size
+ "mul v1.8h, v1.8h, v5.8h \n" // g
+ "mul v2.8h, v2.8h, v5.8h \n" // r
+ "add v0.8h, v0.8h, v6.8h \n" // b + interval_offset
+ "add v1.8h, v1.8h, v6.8h \n" // g
+ "add v2.8h, v2.8h, v6.8h \n" // r
+ "uqxtn v0.8b, v0.8h \n"
+ "uqxtn v1.8b, v1.8h \n"
+ "uqxtn v2.8b, v2.8h \n"
+ MEMACCESS(0)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(width) // %1
+ : "r"(scale), // %2
+ "r"(interval_size), // %3
+ "r"(interval_offset) // %4
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6"
+ );
+}
+#endif // HAS_ARGBQUANTIZEROW_NEON
+
+// Shade 8 pixels at a time by specified value.
+// NOTE vqrdmulh.s16 q10, q10, d0[0] must use a scaler register from 0 to 8.
+// Rounding in vqrdmulh does +1 to high if high bit of low s16 is set.
+#ifdef HAS_ARGBSHADEROW_NEON
+void ARGBShadeRow_NEON(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value) {
+ asm volatile (
+ "dup v0.4s, %w3 \n" // duplicate scale value.
+ "zip1 v0.8b, v0.8b, v0.8b \n" // v0.8b aarrggbb.
+ "ushr v0.8h, v0.8h, #1 \n" // scale / 2.
+
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "uxtl v4.8h, v4.8b \n" // b (0 .. 255)
+ "uxtl v5.8h, v5.8b \n"
+ "uxtl v6.8h, v6.8b \n"
+ "uxtl v7.8h, v7.8b \n"
+ "sqrdmulh v4.8h, v4.8h, v0.h[0] \n" // b * scale * 2
+ "sqrdmulh v5.8h, v5.8h, v0.h[1] \n" // g
+ "sqrdmulh v6.8h, v6.8h, v0.h[2] \n" // r
+ "sqrdmulh v7.8h, v7.8h, v0.h[3] \n" // a
+ "uqxtn v4.8b, v4.8h \n"
+ "uqxtn v5.8b, v5.8h \n"
+ "uqxtn v6.8b, v6.8h \n"
+ "uqxtn v7.8b, v7.8h \n"
+ MEMACCESS(1)
+ "st4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(value) // %3
+ : "cc", "memory", "v0", "v4", "v5", "v6", "v7"
+ );
+}
+#endif // HAS_ARGBSHADEROW_NEON
+
+// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels
+// Similar to ARGBToYJ but stores ARGB.
+// C code is (15 * b + 75 * g + 38 * r + 64) >> 7;
+#ifdef HAS_ARGBGRAYROW_NEON
+void ARGBGrayRow_NEON(const uint8* src_argb, uint8* dst_argb, int width) {
+ asm volatile (
+ "movi v24.8b, #15 \n" // B * 0.11400 coefficient
+ "movi v25.8b, #75 \n" // G * 0.58700 coefficient
+ "movi v26.8b, #38 \n" // R * 0.29900 coefficient
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "umull v4.8h, v0.8b, v24.8b \n" // B
+ "umlal v4.8h, v1.8b, v25.8b \n" // G
+ "umlal v4.8h, v2.8b, v26.8b \n" // R
+ "sqrshrun v0.8b, v4.8h, #7 \n" // 15 bit to 8 bit B
+ "orr v1.8b, v0.8b, v0.8b \n" // G
+ "orr v2.8b, v0.8b, v0.8b \n" // R
+ MEMACCESS(1)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n" // store 8 pixels.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v24", "v25", "v26"
+ );
+}
+#endif // HAS_ARGBGRAYROW_NEON
+
+// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
+// b = (r * 35 + g * 68 + b * 17) >> 7
+// g = (r * 45 + g * 88 + b * 22) >> 7
+// r = (r * 50 + g * 98 + b * 24) >> 7
+
+#ifdef HAS_ARGBSEPIAROW_NEON
+void ARGBSepiaRow_NEON(uint8* dst_argb, int width) {
+ asm volatile (
+ "movi v20.8b, #17 \n" // BB coefficient
+ "movi v21.8b, #68 \n" // BG coefficient
+ "movi v22.8b, #35 \n" // BR coefficient
+ "movi v24.8b, #22 \n" // GB coefficient
+ "movi v25.8b, #88 \n" // GG coefficient
+ "movi v26.8b, #45 \n" // GR coefficient
+ "movi v28.8b, #24 \n" // BB coefficient
+ "movi v29.8b, #98 \n" // BG coefficient
+ "movi v30.8b, #50 \n" // BR coefficient
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0] \n" // load 8 ARGB pixels.
+ "subs %w1, %w1, #8 \n" // 8 processed per loop.
+ "umull v4.8h, v0.8b, v20.8b \n" // B to Sepia B
+ "umlal v4.8h, v1.8b, v21.8b \n" // G
+ "umlal v4.8h, v2.8b, v22.8b \n" // R
+ "umull v5.8h, v0.8b, v24.8b \n" // B to Sepia G
+ "umlal v5.8h, v1.8b, v25.8b \n" // G
+ "umlal v5.8h, v2.8b, v26.8b \n" // R
+ "umull v6.8h, v0.8b, v28.8b \n" // B to Sepia R
+ "umlal v6.8h, v1.8b, v29.8b \n" // G
+ "umlal v6.8h, v2.8b, v30.8b \n" // R
+ "uqshrn v0.8b, v4.8h, #7 \n" // 16 bit to 8 bit B
+ "uqshrn v1.8b, v5.8h, #7 \n" // 16 bit to 8 bit G
+ "uqshrn v2.8b, v6.8h, #7 \n" // 16 bit to 8 bit R
+ MEMACCESS(0)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // store 8 pixels.
+ "b.gt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(width) // %1
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
+ "v20", "v21", "v22", "v24", "v25", "v26", "v28", "v29", "v30"
+ );
+}
+#endif // HAS_ARGBSEPIAROW_NEON
+
+// Tranform 8 ARGB pixels (32 bytes) with color matrix.
+// TODO(fbarchard): Was same as Sepia except matrix is provided. This function
+// needs to saturate. Consider doing a non-saturating version.
+#ifdef HAS_ARGBCOLORMATRIXROW_NEON
+void ARGBColorMatrixRow_NEON(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width) {
+ asm volatile (
+ MEMACCESS(3)
+ "ld1 {v2.16b}, [%3] \n" // load 3 ARGB vectors.
+ "sxtl v0.8h, v2.8b \n" // B,G coefficients s16.
+ "sxtl2 v1.8h, v2.16b \n" // R,A coefficients s16.
+
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v16.8b,v17.8b,v18.8b,v19.8b}, [%0], #32 \n" // load 8 pixels.
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "uxtl v16.8h, v16.8b \n" // b (0 .. 255) 16 bit
+ "uxtl v17.8h, v17.8b \n" // g
+ "uxtl v18.8h, v18.8b \n" // r
+ "uxtl v19.8h, v19.8b \n" // a
+ "mul v22.8h, v16.8h, v0.h[0] \n" // B = B * Matrix B
+ "mul v23.8h, v16.8h, v0.h[4] \n" // G = B * Matrix G
+ "mul v24.8h, v16.8h, v1.h[0] \n" // R = B * Matrix R
+ "mul v25.8h, v16.8h, v1.h[4] \n" // A = B * Matrix A
+ "mul v4.8h, v17.8h, v0.h[1] \n" // B += G * Matrix B
+ "mul v5.8h, v17.8h, v0.h[5] \n" // G += G * Matrix G
+ "mul v6.8h, v17.8h, v1.h[1] \n" // R += G * Matrix R
+ "mul v7.8h, v17.8h, v1.h[5] \n" // A += G * Matrix A
+ "sqadd v22.8h, v22.8h, v4.8h \n" // Accumulate B
+ "sqadd v23.8h, v23.8h, v5.8h \n" // Accumulate G
+ "sqadd v24.8h, v24.8h, v6.8h \n" // Accumulate R
+ "sqadd v25.8h, v25.8h, v7.8h \n" // Accumulate A
+ "mul v4.8h, v18.8h, v0.h[2] \n" // B += R * Matrix B
+ "mul v5.8h, v18.8h, v0.h[6] \n" // G += R * Matrix G
+ "mul v6.8h, v18.8h, v1.h[2] \n" // R += R * Matrix R
+ "mul v7.8h, v18.8h, v1.h[6] \n" // A += R * Matrix A
+ "sqadd v22.8h, v22.8h, v4.8h \n" // Accumulate B
+ "sqadd v23.8h, v23.8h, v5.8h \n" // Accumulate G
+ "sqadd v24.8h, v24.8h, v6.8h \n" // Accumulate R
+ "sqadd v25.8h, v25.8h, v7.8h \n" // Accumulate A
+ "mul v4.8h, v19.8h, v0.h[3] \n" // B += A * Matrix B
+ "mul v5.8h, v19.8h, v0.h[7] \n" // G += A * Matrix G
+ "mul v6.8h, v19.8h, v1.h[3] \n" // R += A * Matrix R
+ "mul v7.8h, v19.8h, v1.h[7] \n" // A += A * Matrix A
+ "sqadd v22.8h, v22.8h, v4.8h \n" // Accumulate B
+ "sqadd v23.8h, v23.8h, v5.8h \n" // Accumulate G
+ "sqadd v24.8h, v24.8h, v6.8h \n" // Accumulate R
+ "sqadd v25.8h, v25.8h, v7.8h \n" // Accumulate A
+ "sqshrun v16.8b, v22.8h, #6 \n" // 16 bit to 8 bit B
+ "sqshrun v17.8b, v23.8h, #6 \n" // 16 bit to 8 bit G
+ "sqshrun v18.8b, v24.8h, #6 \n" // 16 bit to 8 bit R
+ "sqshrun v19.8b, v25.8h, #6 \n" // 16 bit to 8 bit A
+ MEMACCESS(1)
+ "st4 {v16.8b,v17.8b,v18.8b,v19.8b}, [%1], #32 \n" // store 8 pixels.
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(width) // %2
+ : "r"(matrix_argb) // %3
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
+ "v18", "v19", "v22", "v23", "v24", "v25"
+ );
+}
+#endif // HAS_ARGBCOLORMATRIXROW_NEON
+
+// TODO(fbarchard): fix vqshrun in ARGBMultiplyRow_NEON and reenable.
+// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
+#ifdef HAS_ARGBMULTIPLYROW_NEON
+void ARGBMultiplyRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ MEMACCESS(1)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // load 8 more pixels.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "umull v0.8h, v0.8b, v4.8b \n" // multiply B
+ "umull v1.8h, v1.8b, v5.8b \n" // multiply G
+ "umull v2.8h, v2.8b, v6.8b \n" // multiply R
+ "umull v3.8h, v3.8b, v7.8b \n" // multiply A
+ "rshrn v0.8b, v0.8h, #8 \n" // 16 bit to 8 bit B
+ "rshrn v1.8b, v1.8h, #8 \n" // 16 bit to 8 bit G
+ "rshrn v2.8b, v2.8h, #8 \n" // 16 bit to 8 bit R
+ "rshrn v3.8b, v3.8h, #8 \n" // 16 bit to 8 bit A
+ MEMACCESS(2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
+ );
+}
+#endif // HAS_ARGBMULTIPLYROW_NEON
+
+// Add 2 rows of ARGB pixels together, 8 pixels at a time.
+#ifdef HAS_ARGBADDROW_NEON
+void ARGBAddRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ MEMACCESS(1)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // load 8 more pixels.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "uqadd v0.8b, v0.8b, v4.8b \n"
+ "uqadd v1.8b, v1.8b, v5.8b \n"
+ "uqadd v2.8b, v2.8b, v6.8b \n"
+ "uqadd v3.8b, v3.8b, v7.8b \n"
+ MEMACCESS(2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
+ );
+}
+#endif // HAS_ARGBADDROW_NEON
+
+// Subtract 2 rows of ARGB pixels, 8 pixels at a time.
+#ifdef HAS_ARGBSUBTRACTROW_NEON
+void ARGBSubtractRow_NEON(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // load 8 ARGB pixels.
+ MEMACCESS(1)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%1], #32 \n" // load 8 more pixels.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "uqsub v0.8b, v0.8b, v4.8b \n"
+ "uqsub v1.8b, v1.8b, v5.8b \n"
+ "uqsub v2.8b, v2.8b, v6.8b \n"
+ "uqsub v3.8b, v3.8b, v7.8b \n"
+ MEMACCESS(2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+
+ : "+r"(src_argb0), // %0
+ "+r"(src_argb1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"
+ );
+}
+#endif // HAS_ARGBSUBTRACTROW_NEON
+
+// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
+// A = 255
+// R = Sobel
+// G = Sobel
+// B = Sobel
+#ifdef HAS_SOBELROW_NEON
+void SobelRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "movi v3.8b, #255 \n" // alpha
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.8b}, [%0], #8 \n" // load 8 sobelx.
+ MEMACCESS(1)
+ "ld1 {v1.8b}, [%1], #8 \n" // load 8 sobely.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "uqadd v0.8b, v0.8b, v1.8b \n" // add
+ "orr v1.8b, v0.8b, v0.8b \n"
+ "orr v2.8b, v0.8b, v0.8b \n"
+ MEMACCESS(2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3"
+ );
+}
+#endif // HAS_SOBELROW_NEON
+
+// Adds Sobel X and Sobel Y and stores Sobel into plane.
+#ifdef HAS_SOBELTOPLANEROW_NEON
+void SobelToPlaneRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width) {
+ asm volatile (
+ // 16 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load 16 sobelx.
+ MEMACCESS(1)
+ "ld1 {v1.16b}, [%1], #16 \n" // load 16 sobely.
+ "subs %w3, %w3, #16 \n" // 16 processed per loop.
+ "uqadd v0.16b, v0.16b, v1.16b \n" // add
+ MEMACCESS(2)
+ "st1 {v0.16b}, [%2], #16 \n" // store 16 pixels.
+ "b.gt 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_y), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1"
+ );
+}
+#endif // HAS_SOBELTOPLANEROW_NEON
+
+// Mixes Sobel X, Sobel Y and Sobel into ARGB.
+// A = 255
+// R = Sobel X
+// G = Sobel
+// B = Sobel Y
+#ifdef HAS_SOBELXYROW_NEON
+void SobelXYRow_NEON(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ asm volatile (
+ "movi v3.8b, #255 \n" // alpha
+ // 8 pixel loop.
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v2.8b}, [%0], #8 \n" // load 8 sobelx.
+ MEMACCESS(1)
+ "ld1 {v0.8b}, [%1], #8 \n" // load 8 sobely.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "uqadd v1.8b, v0.8b, v2.8b \n" // add
+ MEMACCESS(2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n" // store 8 ARGB pixels
+ "b.gt 1b \n"
+ : "+r"(src_sobelx), // %0
+ "+r"(src_sobely), // %1
+ "+r"(dst_argb), // %2
+ "+r"(width) // %3
+ :
+ : "cc", "memory", "v0", "v1", "v2", "v3"
+ );
+}
+#endif // HAS_SOBELXYROW_NEON
+
+// SobelX as a matrix is
+// -1 0 1
+// -2 0 2
+// -1 0 1
+#ifdef HAS_SOBELXROW_NEON
+void SobelXRow_NEON(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobelx, int width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.8b}, [%0],%5 \n" // top
+ MEMACCESS(0)
+ "ld1 {v1.8b}, [%0],%6 \n"
+ "usubl v0.8h, v0.8b, v1.8b \n"
+ MEMACCESS(1)
+ "ld1 {v2.8b}, [%1],%5 \n" // center * 2
+ MEMACCESS(1)
+ "ld1 {v3.8b}, [%1],%6 \n"
+ "usubl v1.8h, v2.8b, v3.8b \n"
+ "add v0.8h, v0.8h, v1.8h \n"
+ "add v0.8h, v0.8h, v1.8h \n"
+ MEMACCESS(2)
+ "ld1 {v2.8b}, [%2],%5 \n" // bottom
+ MEMACCESS(2)
+ "ld1 {v3.8b}, [%2],%6 \n"
+ "subs %w4, %w4, #8 \n" // 8 pixels
+ "usubl v1.8h, v2.8b, v3.8b \n"
+ "add v0.8h, v0.8h, v1.8h \n"
+ "abs v0.8h, v0.8h \n"
+ "uqxtn v0.8b, v0.8h \n"
+ MEMACCESS(3)
+ "st1 {v0.8b}, [%3], #8 \n" // store 8 sobelx
+ "b.gt 1b \n"
+ : "+r"(src_y0), // %0
+ "+r"(src_y1), // %1
+ "+r"(src_y2), // %2
+ "+r"(dst_sobelx), // %3
+ "+r"(width) // %4
+ : "r"(2LL), // %5
+ "r"(6LL) // %6
+ : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+#endif // HAS_SOBELXROW_NEON
+
+// SobelY as a matrix is
+// -1 -2 -1
+// 0 0 0
+// 1 2 1
+#ifdef HAS_SOBELYROW_NEON
+void SobelYRow_NEON(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.8b}, [%0],%4 \n" // left
+ MEMACCESS(1)
+ "ld1 {v1.8b}, [%1],%4 \n"
+ "usubl v0.8h, v0.8b, v1.8b \n"
+ MEMACCESS(0)
+ "ld1 {v2.8b}, [%0],%4 \n" // center * 2
+ MEMACCESS(1)
+ "ld1 {v3.8b}, [%1],%4 \n"
+ "usubl v1.8h, v2.8b, v3.8b \n"
+ "add v0.8h, v0.8h, v1.8h \n"
+ "add v0.8h, v0.8h, v1.8h \n"
+ MEMACCESS(0)
+ "ld1 {v2.8b}, [%0],%5 \n" // right
+ MEMACCESS(1)
+ "ld1 {v3.8b}, [%1],%5 \n"
+ "subs %w3, %w3, #8 \n" // 8 pixels
+ "usubl v1.8h, v2.8b, v3.8b \n"
+ "add v0.8h, v0.8h, v1.8h \n"
+ "abs v0.8h, v0.8h \n"
+ "uqxtn v0.8b, v0.8h \n"
+ MEMACCESS(2)
+ "st1 {v0.8b}, [%2], #8 \n" // store 8 sobely
+ "b.gt 1b \n"
+ : "+r"(src_y0), // %0
+ "+r"(src_y1), // %1
+ "+r"(dst_sobely), // %2
+ "+r"(width) // %3
+ : "r"(1LL), // %4
+ "r"(6LL) // %5
+ : "cc", "memory", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+#endif // HAS_SOBELYROW_NEON
+#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_win.cc b/media/libaom/src/third_party/libyuv/source/row_win.cc
new file mode 100644
index 000000000..71be268b4
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_win.cc
@@ -0,0 +1,6331 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#if !defined(LIBYUV_DISABLE_X86) && defined(_M_X64) && \
+ defined(_MSC_VER) && !defined(__clang__)
+#include <emmintrin.h>
+#include <tmmintrin.h> // For _mm_maddubs_epi16
+#endif
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for Visual C.
+#if !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64)) && \
+ defined(_MSC_VER) && !defined(__clang__)
+
+struct YuvConstants {
+ lvec8 kUVToB; // 0
+ lvec8 kUVToG; // 32
+ lvec8 kUVToR; // 64
+ lvec16 kUVBiasB; // 96
+ lvec16 kUVBiasG; // 128
+ lvec16 kUVBiasR; // 160
+ lvec16 kYToRgb; // 192
+};
+
+// BT.601 YUV to RGB reference
+// R = (Y - 16) * 1.164 - V * -1.596
+// G = (Y - 16) * 1.164 - U * 0.391 - V * 0.813
+// B = (Y - 16) * 1.164 - U * -2.018
+
+// Y contribution to R,G,B. Scale and bias.
+// TODO(fbarchard): Consider moving constants into a common header.
+#define YG 18997 /* round(1.164 * 64 * 256 * 256 / 257) */
+#define YGB -1160 /* 1.164 * 64 * -16 + 64 / 2 */
+
+// U and V contributions to R,G,B.
+#define UB -128 /* max(-128, round(-2.018 * 64)) */
+#define UG 25 /* round(0.391 * 64) */
+#define VG 52 /* round(0.813 * 64) */
+#define VR -102 /* round(-1.596 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BB (UB * 128 + YGB)
+#define BG (UG * 128 + VG * 128 + YGB)
+#define BR (VR * 128 + YGB)
+
+// BT601 constants for YUV to RGB.
+static YuvConstants SIMD_ALIGNED(kYuvConstants) = {
+ { UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0,
+ UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0 },
+ { UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG,
+ UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG },
+ { 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR,
+ 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR },
+ { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
+ { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
+ { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
+ { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
+};
+
+// BT601 constants for NV21 where chroma plane is VU instead of UV.
+static YuvConstants SIMD_ALIGNED(kYvuConstants) = {
+ { 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB,
+ 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB, 0, UB },
+ { VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG,
+ VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG, VG, UG },
+ { VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0,
+ VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0, VR, 0 },
+ { BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB, BB },
+ { BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG, BG },
+ { BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR, BR },
+ { YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG, YG }
+};
+
+#undef YG
+#undef YGB
+#undef UB
+#undef UG
+#undef VG
+#undef VR
+#undef BB
+#undef BG
+#undef BR
+
+// JPEG YUV to RGB reference
+// * R = Y - V * -1.40200
+// * G = Y - U * 0.34414 - V * 0.71414
+// * B = Y - U * -1.77200
+
+// Y contribution to R,G,B. Scale and bias.
+// TODO(fbarchard): Consider moving constants into a common header.
+#define YGJ 16320 /* round(1.000 * 64 * 256 * 256 / 257) */
+#define YGBJ 32 /* 64 / 2 */
+
+// U and V contributions to R,G,B.
+#define UBJ -113 /* round(-1.77200 * 64) */
+#define UGJ 22 /* round(0.34414 * 64) */
+#define VGJ 46 /* round(0.71414 * 64) */
+#define VRJ -90 /* round(-1.40200 * 64) */
+
+// Bias values to subtract 16 from Y and 128 from U and V.
+#define BBJ (UBJ * 128 + YGBJ)
+#define BGJ (UGJ * 128 + VGJ * 128 + YGBJ)
+#define BRJ (VRJ * 128 + YGBJ)
+
+// JPEG constants for YUV to RGB.
+static YuvConstants SIMD_ALIGNED(kYuvJConstants) = {
+ { UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0,
+ UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0, UBJ, 0 },
+ { UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
+ UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
+ UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ,
+ UGJ, VGJ, UGJ, VGJ, UGJ, VGJ, UGJ, VGJ },
+ { 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ,
+ 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ, 0, VRJ },
+ { BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ,
+ BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ, BBJ },
+ { BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ,
+ BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ, BGJ },
+ { BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ,
+ BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ, BRJ },
+ { YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ,
+ YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ, YGJ }
+};
+
+#undef YGJ
+#undef YGBJ
+#undef UBJ
+#undef UGJ
+#undef VGJ
+#undef VRJ
+#undef BBJ
+#undef BGJ
+#undef BRJ
+
+// 64 bit
+#if defined(_M_X64)
+#if defined(HAS_I422TOARGBROW_SSSE3)
+void I422ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __m128i xmm0, xmm1, xmm2, xmm3;
+ const __m128i xmm5 = _mm_set1_epi8(-1);
+ const ptrdiff_t offset = (uint8*)v_buf - (uint8*)u_buf;
+
+ while (width > 0) {
+ xmm0 = _mm_cvtsi32_si128(*(uint32*)u_buf);
+ xmm1 = _mm_cvtsi32_si128(*(uint32*)(u_buf + offset));
+ xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);
+ xmm0 = _mm_unpacklo_epi16(xmm0, xmm0);
+ xmm1 = _mm_loadu_si128(&xmm0);
+ xmm2 = _mm_loadu_si128(&xmm0);
+ xmm0 = _mm_maddubs_epi16(xmm0, *(__m128i*)kYuvConstants.kUVToB);
+ xmm1 = _mm_maddubs_epi16(xmm1, *(__m128i*)kYuvConstants.kUVToG);
+ xmm2 = _mm_maddubs_epi16(xmm2, *(__m128i*)kYuvConstants.kUVToR);
+ xmm0 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasB, xmm0);
+ xmm1 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasG, xmm1);
+ xmm2 = _mm_sub_epi16(*(__m128i*)kYuvConstants.kUVBiasR, xmm2);
+ xmm3 = _mm_loadl_epi64((__m128i*)y_buf);
+ xmm3 = _mm_unpacklo_epi8(xmm3, xmm3);
+ xmm3 = _mm_mulhi_epu16(xmm3, *(__m128i*)kYuvConstants.kYToRgb);
+ xmm0 = _mm_adds_epi16(xmm0, xmm3);
+ xmm1 = _mm_adds_epi16(xmm1, xmm3);
+ xmm2 = _mm_adds_epi16(xmm2, xmm3);
+ xmm0 = _mm_srai_epi16(xmm0, 6);
+ xmm1 = _mm_srai_epi16(xmm1, 6);
+ xmm2 = _mm_srai_epi16(xmm2, 6);
+ xmm0 = _mm_packus_epi16(xmm0, xmm0);
+ xmm1 = _mm_packus_epi16(xmm1, xmm1);
+ xmm2 = _mm_packus_epi16(xmm2, xmm2);
+ xmm0 = _mm_unpacklo_epi8(xmm0, xmm1);
+ xmm2 = _mm_unpacklo_epi8(xmm2, xmm5);
+ xmm1 = _mm_loadu_si128(&xmm0);
+ xmm0 = _mm_unpacklo_epi16(xmm0, xmm2);
+ xmm1 = _mm_unpackhi_epi16(xmm1, xmm2);
+
+ _mm_storeu_si128((__m128i *)dst_argb, xmm0);
+ _mm_storeu_si128((__m128i *)(dst_argb + 16), xmm1);
+
+ y_buf += 8;
+ u_buf += 4;
+ dst_argb += 32;
+ width -= 8;
+ }
+}
+#endif
+// 32 bit
+#else // defined(_M_X64)
+#ifdef HAS_ARGBTOYROW_SSSE3
+
+// Constants for ARGB.
+static const vec8 kARGBToY = {
+ 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0
+};
+
+// JPeg full range.
+static const vec8 kARGBToYJ = {
+ 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0, 15, 75, 38, 0
+};
+
+static const vec8 kARGBToU = {
+ 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0
+};
+
+static const vec8 kARGBToUJ = {
+ 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0, 127, -84, -43, 0
+};
+
+static const vec8 kARGBToV = {
+ -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0,
+};
+
+static const vec8 kARGBToVJ = {
+ -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0, -20, -107, 127, 0
+};
+
+// vpshufb for vphaddw + vpackuswb packed to shorts.
+static const lvec8 kShufARGBToUV_AVX = {
+ 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15,
+ 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15
+};
+
+// Constants for BGRA.
+static const vec8 kBGRAToY = {
+ 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13
+};
+
+static const vec8 kBGRAToU = {
+ 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112
+};
+
+static const vec8 kBGRAToV = {
+ 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18
+};
+
+// Constants for ABGR.
+static const vec8 kABGRToY = {
+ 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0, 33, 65, 13, 0
+};
+
+static const vec8 kABGRToU = {
+ -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0, -38, -74, 112, 0
+};
+
+static const vec8 kABGRToV = {
+ 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0, 112, -94, -18, 0
+};
+
+// Constants for RGBA.
+static const vec8 kRGBAToY = {
+ 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33, 0, 13, 65, 33
+};
+
+static const vec8 kRGBAToU = {
+ 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38, 0, 112, -74, -38
+};
+
+static const vec8 kRGBAToV = {
+ 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112, 0, -18, -94, 112
+};
+
+static const uvec8 kAddY16 = {
+ 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u, 16u
+};
+
+// 7 bit fixed point 0.5.
+static const vec16 kAddYJ64 = {
+ 64, 64, 64, 64, 64, 64, 64, 64
+};
+
+static const uvec8 kAddUV128 = {
+ 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u,
+ 128u, 128u, 128u, 128u, 128u, 128u, 128u, 128u
+};
+
+static const uvec16 kAddUVJ128 = {
+ 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u, 0x8080u
+};
+
+// Shuffle table for converting RGB24 to ARGB.
+static const uvec8 kShuffleMaskRGB24ToARGB = {
+ 0u, 1u, 2u, 12u, 3u, 4u, 5u, 13u, 6u, 7u, 8u, 14u, 9u, 10u, 11u, 15u
+};
+
+// Shuffle table for converting RAW to ARGB.
+static const uvec8 kShuffleMaskRAWToARGB = {
+ 2u, 1u, 0u, 12u, 5u, 4u, 3u, 13u, 8u, 7u, 6u, 14u, 11u, 10u, 9u, 15u
+};
+
+// Shuffle table for converting ARGB to RGB24.
+static const uvec8 kShuffleMaskARGBToRGB24 = {
+ 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 10u, 12u, 13u, 14u, 128u, 128u, 128u, 128u
+};
+
+// Shuffle table for converting ARGB to RAW.
+static const uvec8 kShuffleMaskARGBToRAW = {
+ 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 8u, 14u, 13u, 12u, 128u, 128u, 128u, 128u
+};
+
+// Shuffle table for converting ARGBToRGB24 for I422ToRGB24. First 8 + next 4
+static const uvec8 kShuffleMaskARGBToRGB24_0 = {
+ 0u, 1u, 2u, 4u, 5u, 6u, 8u, 9u, 128u, 128u, 128u, 128u, 10u, 12u, 13u, 14u
+};
+
+// Shuffle table for converting ARGB to RAW.
+static const uvec8 kShuffleMaskARGBToRAW_0 = {
+ 2u, 1u, 0u, 6u, 5u, 4u, 10u, 9u, 128u, 128u, 128u, 128u, 8u, 14u, 13u, 12u
+};
+
+// Duplicates gray value 3 times and fills in alpha opaque.
+__declspec(naked)
+void J400ToARGBRow_SSE2(const uint8* src_y, uint8* dst_argb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_y
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0xff000000
+ pslld xmm5, 24
+
+ convertloop:
+ movq xmm0, qword ptr [eax]
+ lea eax, [eax + 8]
+ punpcklbw xmm0, xmm0
+ movdqa xmm1, xmm0
+ punpcklwd xmm0, xmm0
+ punpckhwd xmm1, xmm1
+ por xmm0, xmm5
+ por xmm1, xmm5
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+
+#ifdef HAS_J400TOARGBROW_AVX2
+// Duplicates gray value 3 times and fills in alpha opaque.
+__declspec(naked)
+void J400ToARGBRow_AVX2(const uint8* src_y, uint8* dst_argb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_y
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0xff000000
+ vpslld ymm5, ymm5, 24
+
+ convertloop:
+ vmovdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ vpermq ymm0, ymm0, 0xd8
+ vpunpcklbw ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8
+ vpunpckhwd ymm1, ymm0, ymm0
+ vpunpcklwd ymm0, ymm0, ymm0
+ vpor ymm0, ymm0, ymm5
+ vpor ymm1, ymm1, ymm5
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_J400TOARGBROW_AVX2
+
+__declspec(naked)
+void RGB24ToARGBRow_SSSE3(const uint8* src_rgb24, uint8* dst_argb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_rgb24
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0xff000000
+ pslld xmm5, 24
+ movdqa xmm4, kShuffleMaskRGB24ToARGB
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm3, [eax + 32]
+ lea eax, [eax + 48]
+ movdqa xmm2, xmm3
+ palignr xmm2, xmm1, 8 // xmm2 = { xmm3[0:3] xmm1[8:15]}
+ pshufb xmm2, xmm4
+ por xmm2, xmm5
+ palignr xmm1, xmm0, 12 // xmm1 = { xmm3[0:7] xmm0[12:15]}
+ pshufb xmm0, xmm4
+ movdqu [edx + 32], xmm2
+ por xmm0, xmm5
+ pshufb xmm1, xmm4
+ movdqu [edx], xmm0
+ por xmm1, xmm5
+ palignr xmm3, xmm3, 4 // xmm3 = { xmm3[4:15]}
+ pshufb xmm3, xmm4
+ movdqu [edx + 16], xmm1
+ por xmm3, xmm5
+ movdqu [edx + 48], xmm3
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void RAWToARGBRow_SSSE3(const uint8* src_raw, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_raw
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0xff000000
+ pslld xmm5, 24
+ movdqa xmm4, kShuffleMaskRAWToARGB
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm3, [eax + 32]
+ lea eax, [eax + 48]
+ movdqa xmm2, xmm3
+ palignr xmm2, xmm1, 8 // xmm2 = { xmm3[0:3] xmm1[8:15]}
+ pshufb xmm2, xmm4
+ por xmm2, xmm5
+ palignr xmm1, xmm0, 12 // xmm1 = { xmm3[0:7] xmm0[12:15]}
+ pshufb xmm0, xmm4
+ movdqu [edx + 32], xmm2
+ por xmm0, xmm5
+ pshufb xmm1, xmm4
+ movdqu [edx], xmm0
+ por xmm1, xmm5
+ palignr xmm3, xmm3, 4 // xmm3 = { xmm3[4:15]}
+ pshufb xmm3, xmm4
+ movdqu [edx + 16], xmm1
+ por xmm3, xmm5
+ movdqu [edx + 48], xmm3
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+// pmul method to replicate bits.
+// Math to replicate bits:
+// (v << 8) | (v << 3)
+// v * 256 + v * 8
+// v * (256 + 8)
+// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
+// 20 instructions.
+__declspec(naked)
+void RGB565ToARGBRow_SSE2(const uint8* src_rgb565, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, 0x01080108 // generate multiplier to repeat 5 bits
+ movd xmm5, eax
+ pshufd xmm5, xmm5, 0
+ mov eax, 0x20802080 // multiplier shift by 5 and then repeat 6 bits
+ movd xmm6, eax
+ pshufd xmm6, xmm6, 0
+ pcmpeqb xmm3, xmm3 // generate mask 0xf800f800 for Red
+ psllw xmm3, 11
+ pcmpeqb xmm4, xmm4 // generate mask 0x07e007e0 for Green
+ psllw xmm4, 10
+ psrlw xmm4, 5
+ pcmpeqb xmm7, xmm7 // generate mask 0xff00ff00 for Alpha
+ psllw xmm7, 8
+
+ mov eax, [esp + 4] // src_rgb565
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ sub edx, eax
+ sub edx, eax
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 8 pixels of bgr565
+ movdqa xmm1, xmm0
+ movdqa xmm2, xmm0
+ pand xmm1, xmm3 // R in upper 5 bits
+ psllw xmm2, 11 // B in upper 5 bits
+ pmulhuw xmm1, xmm5 // * (256 + 8)
+ pmulhuw xmm2, xmm5 // * (256 + 8)
+ psllw xmm1, 8
+ por xmm1, xmm2 // RB
+ pand xmm0, xmm4 // G in middle 6 bits
+ pmulhuw xmm0, xmm6 // << 5 * (256 + 4)
+ por xmm0, xmm7 // AG
+ movdqa xmm2, xmm1
+ punpcklbw xmm1, xmm0
+ punpckhbw xmm2, xmm0
+ movdqu [eax * 2 + edx], xmm1 // store 4 pixels of ARGB
+ movdqu [eax * 2 + edx + 16], xmm2 // store next 4 pixels of ARGB
+ lea eax, [eax + 16]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+
+#ifdef HAS_RGB565TOARGBROW_AVX2
+// pmul method to replicate bits.
+// Math to replicate bits:
+// (v << 8) | (v << 3)
+// v * 256 + v * 8
+// v * (256 + 8)
+// G shift of 5 is incorporated, so shift is 5 + 8 and 5 + 3
+__declspec(naked)
+void RGB565ToARGBRow_AVX2(const uint8* src_rgb565, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, 0x01080108 // generate multiplier to repeat 5 bits
+ vmovd xmm5, eax
+ vbroadcastss ymm5, xmm5
+ mov eax, 0x20802080 // multiplier shift by 5 and then repeat 6 bits
+ movd xmm6, eax
+ vbroadcastss ymm6, xmm6
+ vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red
+ vpsllw ymm3, ymm3, 11
+ vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0x07e007e0 for Green
+ vpsllw ymm4, ymm4, 10
+ vpsrlw ymm4, ymm4, 5
+ vpcmpeqb ymm7, ymm7, ymm7 // generate mask 0xff00ff00 for Alpha
+ vpsllw ymm7, ymm7, 8
+
+ mov eax, [esp + 4] // src_rgb565
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ sub edx, eax
+ sub edx, eax
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 16 pixels of bgr565
+ vpand ymm1, ymm0, ymm3 // R in upper 5 bits
+ vpsllw ymm2, ymm0, 11 // B in upper 5 bits
+ vpmulhuw ymm1, ymm1, ymm5 // * (256 + 8)
+ vpmulhuw ymm2, ymm2, ymm5 // * (256 + 8)
+ vpsllw ymm1, ymm1, 8
+ vpor ymm1, ymm1, ymm2 // RB
+ vpand ymm0, ymm0, ymm4 // G in middle 6 bits
+ vpmulhuw ymm0, ymm0, ymm6 // << 5 * (256 + 4)
+ vpor ymm0, ymm0, ymm7 // AG
+ vpermq ymm0, ymm0, 0xd8 // mutate for unpack
+ vpermq ymm1, ymm1, 0xd8
+ vpunpckhbw ymm2, ymm1, ymm0
+ vpunpcklbw ymm1, ymm1, ymm0
+ vmovdqu [eax * 2 + edx], ymm1 // store 4 pixels of ARGB
+ vmovdqu [eax * 2 + edx + 32], ymm2 // store next 4 pixels of ARGB
+ lea eax, [eax + 32]
+ sub ecx, 16
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_RGB565TOARGBROW_AVX2
+
+#ifdef HAS_ARGB1555TOARGBROW_AVX2
+__declspec(naked)
+void ARGB1555ToARGBRow_AVX2(const uint8* src_argb1555, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, 0x01080108 // generate multiplier to repeat 5 bits
+ vmovd xmm5, eax
+ vbroadcastss ymm5, xmm5
+ mov eax, 0x42004200 // multiplier shift by 6 and then repeat 5 bits
+ movd xmm6, eax
+ vbroadcastss ymm6, xmm6
+ vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0xf800f800 for Red
+ vpsllw ymm3, ymm3, 11
+ vpsrlw ymm4, ymm3, 6 // generate mask 0x03e003e0 for Green
+ vpcmpeqb ymm7, ymm7, ymm7 // generate mask 0xff00ff00 for Alpha
+ vpsllw ymm7, ymm7, 8
+
+ mov eax, [esp + 4] // src_argb1555
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ sub edx, eax
+ sub edx, eax
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 16 pixels of 1555
+ vpsllw ymm1, ymm0, 1 // R in upper 5 bits
+ vpsllw ymm2, ymm0, 11 // B in upper 5 bits
+ vpand ymm1, ymm1, ymm3
+ vpmulhuw ymm2, ymm2, ymm5 // * (256 + 8)
+ vpmulhuw ymm1, ymm1, ymm5 // * (256 + 8)
+ vpsllw ymm1, ymm1, 8
+ vpor ymm1, ymm1, ymm2 // RB
+ vpsraw ymm2, ymm0, 8 // A
+ vpand ymm0, ymm0, ymm4 // G in middle 5 bits
+ vpmulhuw ymm0, ymm0, ymm6 // << 6 * (256 + 8)
+ vpand ymm2, ymm2, ymm7
+ vpor ymm0, ymm0, ymm2 // AG
+ vpermq ymm0, ymm0, 0xd8 // mutate for unpack
+ vpermq ymm1, ymm1, 0xd8
+ vpunpckhbw ymm2, ymm1, ymm0
+ vpunpcklbw ymm1, ymm1, ymm0
+ vmovdqu [eax * 2 + edx], ymm1 // store 8 pixels of ARGB
+ vmovdqu [eax * 2 + edx + 32], ymm2 // store next 8 pixels of ARGB
+ lea eax, [eax + 32]
+ sub ecx, 16
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGB1555TOARGBROW_AVX2
+
+#ifdef HAS_ARGB4444TOARGBROW_AVX2
+__declspec(naked)
+void ARGB4444ToARGBRow_AVX2(const uint8* src_argb4444, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, 0x0f0f0f0f // generate mask 0x0f0f0f0f
+ vmovd xmm4, eax
+ vbroadcastss ymm4, xmm4
+ vpslld ymm5, ymm4, 4 // 0xf0f0f0f0 for high nibbles
+ mov eax, [esp + 4] // src_argb4444
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ sub edx, eax
+ sub edx, eax
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 16 pixels of bgra4444
+ vpand ymm2, ymm0, ymm5 // mask high nibbles
+ vpand ymm0, ymm0, ymm4 // mask low nibbles
+ vpsrlw ymm3, ymm2, 4
+ vpsllw ymm1, ymm0, 4
+ vpor ymm2, ymm2, ymm3
+ vpor ymm0, ymm0, ymm1
+ vpermq ymm0, ymm0, 0xd8 // mutate for unpack
+ vpermq ymm2, ymm2, 0xd8
+ vpunpckhbw ymm1, ymm0, ymm2
+ vpunpcklbw ymm0, ymm0, ymm2
+ vmovdqu [eax * 2 + edx], ymm0 // store 8 pixels of ARGB
+ vmovdqu [eax * 2 + edx + 32], ymm1 // store next 8 pixels of ARGB
+ lea eax, [eax + 32]
+ sub ecx, 16
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGB4444TOARGBROW_AVX2
+
+// 24 instructions
+__declspec(naked)
+void ARGB1555ToARGBRow_SSE2(const uint8* src_argb1555, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, 0x01080108 // generate multiplier to repeat 5 bits
+ movd xmm5, eax
+ pshufd xmm5, xmm5, 0
+ mov eax, 0x42004200 // multiplier shift by 6 and then repeat 5 bits
+ movd xmm6, eax
+ pshufd xmm6, xmm6, 0
+ pcmpeqb xmm3, xmm3 // generate mask 0xf800f800 for Red
+ psllw xmm3, 11
+ movdqa xmm4, xmm3 // generate mask 0x03e003e0 for Green
+ psrlw xmm4, 6
+ pcmpeqb xmm7, xmm7 // generate mask 0xff00ff00 for Alpha
+ psllw xmm7, 8
+
+ mov eax, [esp + 4] // src_argb1555
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ sub edx, eax
+ sub edx, eax
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 8 pixels of 1555
+ movdqa xmm1, xmm0
+ movdqa xmm2, xmm0
+ psllw xmm1, 1 // R in upper 5 bits
+ psllw xmm2, 11 // B in upper 5 bits
+ pand xmm1, xmm3
+ pmulhuw xmm2, xmm5 // * (256 + 8)
+ pmulhuw xmm1, xmm5 // * (256 + 8)
+ psllw xmm1, 8
+ por xmm1, xmm2 // RB
+ movdqa xmm2, xmm0
+ pand xmm0, xmm4 // G in middle 5 bits
+ psraw xmm2, 8 // A
+ pmulhuw xmm0, xmm6 // << 6 * (256 + 8)
+ pand xmm2, xmm7
+ por xmm0, xmm2 // AG
+ movdqa xmm2, xmm1
+ punpcklbw xmm1, xmm0
+ punpckhbw xmm2, xmm0
+ movdqu [eax * 2 + edx], xmm1 // store 4 pixels of ARGB
+ movdqu [eax * 2 + edx + 16], xmm2 // store next 4 pixels of ARGB
+ lea eax, [eax + 16]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+
+// 18 instructions.
+__declspec(naked)
+void ARGB4444ToARGBRow_SSE2(const uint8* src_argb4444, uint8* dst_argb,
+ int pix) {
+ __asm {
+ mov eax, 0x0f0f0f0f // generate mask 0x0f0f0f0f
+ movd xmm4, eax
+ pshufd xmm4, xmm4, 0
+ movdqa xmm5, xmm4 // 0xf0f0f0f0 for high nibbles
+ pslld xmm5, 4
+ mov eax, [esp + 4] // src_argb4444
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // pix
+ sub edx, eax
+ sub edx, eax
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 8 pixels of bgra4444
+ movdqa xmm2, xmm0
+ pand xmm0, xmm4 // mask low nibbles
+ pand xmm2, xmm5 // mask high nibbles
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ psllw xmm1, 4
+ psrlw xmm3, 4
+ por xmm0, xmm1
+ por xmm2, xmm3
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm2
+ punpckhbw xmm1, xmm2
+ movdqu [eax * 2 + edx], xmm0 // store 4 pixels of ARGB
+ movdqu [eax * 2 + edx + 16], xmm1 // store next 4 pixels of ARGB
+ lea eax, [eax + 16]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void ARGBToRGB24Row_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ movdqa xmm6, kShuffleMaskARGBToRGB24
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 16 pixels of argb
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ lea eax, [eax + 64]
+ pshufb xmm0, xmm6 // pack 16 bytes of ARGB to 12 bytes of RGB
+ pshufb xmm1, xmm6
+ pshufb xmm2, xmm6
+ pshufb xmm3, xmm6
+ movdqa xmm4, xmm1 // 4 bytes from 1 for 0
+ psrldq xmm1, 4 // 8 bytes from 1
+ pslldq xmm4, 12 // 4 bytes from 1 for 0
+ movdqa xmm5, xmm2 // 8 bytes from 2 for 1
+ por xmm0, xmm4 // 4 bytes from 1 for 0
+ pslldq xmm5, 8 // 8 bytes from 2 for 1
+ movdqu [edx], xmm0 // store 0
+ por xmm1, xmm5 // 8 bytes from 2 for 1
+ psrldq xmm2, 8 // 4 bytes from 2
+ pslldq xmm3, 4 // 12 bytes from 3 for 2
+ por xmm2, xmm3 // 12 bytes from 3 for 2
+ movdqu [edx + 16], xmm1 // store 1
+ movdqu [edx + 32], xmm2 // store 2
+ lea edx, [edx + 48]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void ARGBToRAWRow_SSSE3(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ movdqa xmm6, kShuffleMaskARGBToRAW
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 16 pixels of argb
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ lea eax, [eax + 64]
+ pshufb xmm0, xmm6 // pack 16 bytes of ARGB to 12 bytes of RGB
+ pshufb xmm1, xmm6
+ pshufb xmm2, xmm6
+ pshufb xmm3, xmm6
+ movdqa xmm4, xmm1 // 4 bytes from 1 for 0
+ psrldq xmm1, 4 // 8 bytes from 1
+ pslldq xmm4, 12 // 4 bytes from 1 for 0
+ movdqa xmm5, xmm2 // 8 bytes from 2 for 1
+ por xmm0, xmm4 // 4 bytes from 1 for 0
+ pslldq xmm5, 8 // 8 bytes from 2 for 1
+ movdqu [edx], xmm0 // store 0
+ por xmm1, xmm5 // 8 bytes from 2 for 1
+ psrldq xmm2, 8 // 4 bytes from 2
+ pslldq xmm3, 4 // 12 bytes from 3 for 2
+ por xmm2, xmm3 // 12 bytes from 3 for 2
+ movdqu [edx + 16], xmm1 // store 1
+ movdqu [edx + 32], xmm2 // store 2
+ lea edx, [edx + 48]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+// 4 pixels
+__declspec(naked)
+void ARGBToRGB565Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm3, xmm3 // generate mask 0x0000001f
+ psrld xmm3, 27
+ pcmpeqb xmm4, xmm4 // generate mask 0x000007e0
+ psrld xmm4, 26
+ pslld xmm4, 5
+ pcmpeqb xmm5, xmm5 // generate mask 0xfffff800
+ pslld xmm5, 11
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 4 pixels of argb
+ movdqa xmm1, xmm0 // B
+ movdqa xmm2, xmm0 // G
+ pslld xmm0, 8 // R
+ psrld xmm1, 3 // B
+ psrld xmm2, 5 // G
+ psrad xmm0, 16 // R
+ pand xmm1, xmm3 // B
+ pand xmm2, xmm4 // G
+ pand xmm0, xmm5 // R
+ por xmm1, xmm2 // BG
+ por xmm0, xmm1 // BGR
+ packssdw xmm0, xmm0
+ lea eax, [eax + 16]
+ movq qword ptr [edx], xmm0 // store 4 pixels of RGB565
+ lea edx, [edx + 8]
+ sub ecx, 4
+ jg convertloop
+ ret
+ }
+}
+
+// 8 pixels
+__declspec(naked)
+void ARGBToRGB565DitherRow_SSE2(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix) {
+ __asm {
+
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ movd xmm6, [esp + 12] // dither4
+ mov ecx, [esp + 16] // pix
+ punpcklbw xmm6, xmm6 // make dither 16 bytes
+ movdqa xmm7, xmm6
+ punpcklwd xmm6, xmm6
+ punpckhwd xmm7, xmm7
+ pcmpeqb xmm3, xmm3 // generate mask 0x0000001f
+ psrld xmm3, 27
+ pcmpeqb xmm4, xmm4 // generate mask 0x000007e0
+ psrld xmm4, 26
+ pslld xmm4, 5
+ pcmpeqb xmm5, xmm5 // generate mask 0xfffff800
+ pslld xmm5, 11
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 4 pixels of argb
+ paddusb xmm0, xmm6 // add dither
+ movdqa xmm1, xmm0 // B
+ movdqa xmm2, xmm0 // G
+ pslld xmm0, 8 // R
+ psrld xmm1, 3 // B
+ psrld xmm2, 5 // G
+ psrad xmm0, 16 // R
+ pand xmm1, xmm3 // B
+ pand xmm2, xmm4 // G
+ pand xmm0, xmm5 // R
+ por xmm1, xmm2 // BG
+ por xmm0, xmm1 // BGR
+ packssdw xmm0, xmm0
+ lea eax, [eax + 16]
+ movq qword ptr [edx], xmm0 // store 4 pixels of RGB565
+ lea edx, [edx + 8]
+ sub ecx, 4
+ jg convertloop
+ ret
+ }
+}
+
+#ifdef HAS_ARGBTORGB565DITHERROW_AVX2
+__declspec(naked)
+void ARGBToRGB565DitherRow_AVX2(const uint8* src_argb, uint8* dst_rgb,
+ const uint32 dither4, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ vbroadcastss xmm6, [esp + 12] // dither4
+ mov ecx, [esp + 16] // pix
+ vpunpcklbw xmm6, xmm6, xmm6 // make dither 32 bytes
+ vpermq ymm6, ymm6, 0xd8
+ vpunpcklwd ymm6, ymm6, ymm6
+ vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0x0000001f
+ vpsrld ymm3, ymm3, 27
+ vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0x000007e0
+ vpsrld ymm4, ymm4, 26
+ vpslld ymm4, ymm4, 5
+ vpslld ymm5, ymm3, 11 // generate mask 0x0000f800
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 8 pixels of argb
+ vpaddusb ymm0, ymm0, ymm6 // add dither
+ vpsrld ymm2, ymm0, 5 // G
+ vpsrld ymm1, ymm0, 3 // B
+ vpsrld ymm0, ymm0, 8 // R
+ vpand ymm2, ymm2, ymm4 // G
+ vpand ymm1, ymm1, ymm3 // B
+ vpand ymm0, ymm0, ymm5 // R
+ vpor ymm1, ymm1, ymm2 // BG
+ vpor ymm0, ymm0, ymm1 // BGR
+ vpackusdw ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8
+ lea eax, [eax + 32]
+ vmovdqu [edx], xmm0 // store 8 pixels of RGB565
+ lea edx, [edx + 16]
+ sub ecx, 8
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTORGB565DITHERROW_AVX2
+
+// TODO(fbarchard): Improve sign extension/packing.
+__declspec(naked)
+void ARGBToARGB1555Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm4, xmm4 // generate mask 0x0000001f
+ psrld xmm4, 27
+ movdqa xmm5, xmm4 // generate mask 0x000003e0
+ pslld xmm5, 5
+ movdqa xmm6, xmm4 // generate mask 0x00007c00
+ pslld xmm6, 10
+ pcmpeqb xmm7, xmm7 // generate mask 0xffff8000
+ pslld xmm7, 15
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 4 pixels of argb
+ movdqa xmm1, xmm0 // B
+ movdqa xmm2, xmm0 // G
+ movdqa xmm3, xmm0 // R
+ psrad xmm0, 16 // A
+ psrld xmm1, 3 // B
+ psrld xmm2, 6 // G
+ psrld xmm3, 9 // R
+ pand xmm0, xmm7 // A
+ pand xmm1, xmm4 // B
+ pand xmm2, xmm5 // G
+ pand xmm3, xmm6 // R
+ por xmm0, xmm1 // BA
+ por xmm2, xmm3 // GR
+ por xmm0, xmm2 // BGRA
+ packssdw xmm0, xmm0
+ lea eax, [eax + 16]
+ movq qword ptr [edx], xmm0 // store 4 pixels of ARGB1555
+ lea edx, [edx + 8]
+ sub ecx, 4
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void ARGBToARGB4444Row_SSE2(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm4, xmm4 // generate mask 0xf000f000
+ psllw xmm4, 12
+ movdqa xmm3, xmm4 // generate mask 0x00f000f0
+ psrlw xmm3, 8
+
+ convertloop:
+ movdqu xmm0, [eax] // fetch 4 pixels of argb
+ movdqa xmm1, xmm0
+ pand xmm0, xmm3 // low nibble
+ pand xmm1, xmm4 // high nibble
+ psrld xmm0, 4
+ psrld xmm1, 8
+ por xmm0, xmm1
+ packuswb xmm0, xmm0
+ lea eax, [eax + 16]
+ movq qword ptr [edx], xmm0 // store 4 pixels of ARGB4444
+ lea edx, [edx + 8]
+ sub ecx, 4
+ jg convertloop
+ ret
+ }
+}
+
+#ifdef HAS_ARGBTORGB565ROW_AVX2
+__declspec(naked)
+void ARGBToRGB565Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ vpcmpeqb ymm3, ymm3, ymm3 // generate mask 0x0000001f
+ vpsrld ymm3, ymm3, 27
+ vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0x000007e0
+ vpsrld ymm4, ymm4, 26
+ vpslld ymm4, ymm4, 5
+ vpslld ymm5, ymm3, 11 // generate mask 0x0000f800
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 8 pixels of argb
+ vpsrld ymm2, ymm0, 5 // G
+ vpsrld ymm1, ymm0, 3 // B
+ vpsrld ymm0, ymm0, 8 // R
+ vpand ymm2, ymm2, ymm4 // G
+ vpand ymm1, ymm1, ymm3 // B
+ vpand ymm0, ymm0, ymm5 // R
+ vpor ymm1, ymm1, ymm2 // BG
+ vpor ymm0, ymm0, ymm1 // BGR
+ vpackusdw ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8
+ lea eax, [eax + 32]
+ vmovdqu [edx], xmm0 // store 8 pixels of RGB565
+ lea edx, [edx + 16]
+ sub ecx, 8
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTORGB565ROW_AVX2
+
+#ifdef HAS_ARGBTOARGB1555ROW_AVX2
+__declspec(naked)
+void ARGBToARGB1555Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ vpcmpeqb ymm4, ymm4, ymm4
+ vpsrld ymm4, ymm4, 27 // generate mask 0x0000001f
+ vpslld ymm5, ymm4, 5 // generate mask 0x000003e0
+ vpslld ymm6, ymm4, 10 // generate mask 0x00007c00
+ vpcmpeqb ymm7, ymm7, ymm7 // generate mask 0xffff8000
+ vpslld ymm7, ymm7, 15
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 8 pixels of argb
+ vpsrld ymm3, ymm0, 9 // R
+ vpsrld ymm2, ymm0, 6 // G
+ vpsrld ymm1, ymm0, 3 // B
+ vpsrad ymm0, ymm0, 16 // A
+ vpand ymm3, ymm3, ymm6 // R
+ vpand ymm2, ymm2, ymm5 // G
+ vpand ymm1, ymm1, ymm4 // B
+ vpand ymm0, ymm0, ymm7 // A
+ vpor ymm0, ymm0, ymm1 // BA
+ vpor ymm2, ymm2, ymm3 // GR
+ vpor ymm0, ymm0, ymm2 // BGRA
+ vpackssdw ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8
+ lea eax, [eax + 32]
+ vmovdqu [edx], xmm0 // store 8 pixels of ARGB1555
+ lea edx, [edx + 16]
+ sub ecx, 8
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTOARGB1555ROW_AVX2
+
+#ifdef HAS_ARGBTOARGB4444ROW_AVX2
+__declspec(naked)
+void ARGBToARGB4444Row_AVX2(const uint8* src_argb, uint8* dst_rgb, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_rgb
+ mov ecx, [esp + 12] // pix
+ vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0xf000f000
+ vpsllw ymm4, ymm4, 12
+ vpsrlw ymm3, ymm4, 8 // generate mask 0x00f000f0
+
+ convertloop:
+ vmovdqu ymm0, [eax] // fetch 8 pixels of argb
+ vpand ymm1, ymm0, ymm4 // high nibble
+ vpand ymm0, ymm0, ymm3 // low nibble
+ vpsrld ymm1, ymm1, 8
+ vpsrld ymm0, ymm0, 4
+ vpor ymm0, ymm0, ymm1
+ vpackuswb ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8
+ lea eax, [eax + 32]
+ vmovdqu [edx], xmm0 // store 8 pixels of ARGB4444
+ lea edx, [edx + 16]
+ sub ecx, 8
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTOARGB4444ROW_AVX2
+
+// Convert 16 ARGB pixels (64 bytes) to 16 Y values.
+__declspec(naked)
+void ARGBToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ movdqa xmm4, kARGBToY
+ movdqa xmm5, kAddY16
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm4
+ pmaddubsw xmm1, xmm4
+ pmaddubsw xmm2, xmm4
+ pmaddubsw xmm3, xmm4
+ lea eax, [eax + 64]
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ psrlw xmm0, 7
+ psrlw xmm2, 7
+ packuswb xmm0, xmm2
+ paddb xmm0, xmm5
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+// Convert 16 ARGB pixels (64 bytes) to 16 YJ values.
+// Same as ARGBToYRow but different coefficients, no add 16, but do rounding.
+__declspec(naked)
+void ARGBToYJRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ movdqa xmm4, kARGBToYJ
+ movdqa xmm5, kAddYJ64
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm4
+ pmaddubsw xmm1, xmm4
+ pmaddubsw xmm2, xmm4
+ pmaddubsw xmm3, xmm4
+ lea eax, [eax + 64]
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ paddw xmm0, xmm5 // Add .5 for rounding.
+ paddw xmm2, xmm5
+ psrlw xmm0, 7
+ psrlw xmm2, 7
+ packuswb xmm0, xmm2
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+#ifdef HAS_ARGBTOYROW_AVX2
+// vpermd for vphaddw + vpackuswb vpermd.
+static const lvec32 kPermdARGBToY_AVX = {
+ 0, 4, 1, 5, 2, 6, 3, 7
+};
+
+// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
+__declspec(naked)
+void ARGBToYRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ vbroadcastf128 ymm4, kARGBToY
+ vbroadcastf128 ymm5, kAddY16
+ vmovdqu ymm6, kPermdARGBToY_AVX
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ vmovdqu ymm2, [eax + 64]
+ vmovdqu ymm3, [eax + 96]
+ vpmaddubsw ymm0, ymm0, ymm4
+ vpmaddubsw ymm1, ymm1, ymm4
+ vpmaddubsw ymm2, ymm2, ymm4
+ vpmaddubsw ymm3, ymm3, ymm4
+ lea eax, [eax + 128]
+ vphaddw ymm0, ymm0, ymm1 // mutates.
+ vphaddw ymm2, ymm2, ymm3
+ vpsrlw ymm0, ymm0, 7
+ vpsrlw ymm2, ymm2, 7
+ vpackuswb ymm0, ymm0, ymm2 // mutates.
+ vpermd ymm0, ymm6, ymm0 // For vphaddw + vpackuswb mutation.
+ vpaddb ymm0, ymm0, ymm5 // add 16 for Y
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTOYROW_AVX2
+
+#ifdef HAS_ARGBTOYJROW_AVX2
+// Convert 32 ARGB pixels (128 bytes) to 32 Y values.
+__declspec(naked)
+void ARGBToYJRow_AVX2(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ vbroadcastf128 ymm4, kARGBToYJ
+ vbroadcastf128 ymm5, kAddYJ64
+ vmovdqu ymm6, kPermdARGBToY_AVX
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ vmovdqu ymm2, [eax + 64]
+ vmovdqu ymm3, [eax + 96]
+ vpmaddubsw ymm0, ymm0, ymm4
+ vpmaddubsw ymm1, ymm1, ymm4
+ vpmaddubsw ymm2, ymm2, ymm4
+ vpmaddubsw ymm3, ymm3, ymm4
+ lea eax, [eax + 128]
+ vphaddw ymm0, ymm0, ymm1 // mutates.
+ vphaddw ymm2, ymm2, ymm3
+ vpaddw ymm0, ymm0, ymm5 // Add .5 for rounding.
+ vpaddw ymm2, ymm2, ymm5
+ vpsrlw ymm0, ymm0, 7
+ vpsrlw ymm2, ymm2, 7
+ vpackuswb ymm0, ymm0, ymm2 // mutates.
+ vpermd ymm0, ymm6, ymm0 // For vphaddw + vpackuswb mutation.
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTOYJROW_AVX2
+
+__declspec(naked)
+void BGRAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ movdqa xmm4, kBGRAToY
+ movdqa xmm5, kAddY16
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm4
+ pmaddubsw xmm1, xmm4
+ pmaddubsw xmm2, xmm4
+ pmaddubsw xmm3, xmm4
+ lea eax, [eax + 64]
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ psrlw xmm0, 7
+ psrlw xmm2, 7
+ packuswb xmm0, xmm2
+ paddb xmm0, xmm5
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void ABGRToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ movdqa xmm4, kABGRToY
+ movdqa xmm5, kAddY16
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm4
+ pmaddubsw xmm1, xmm4
+ pmaddubsw xmm2, xmm4
+ pmaddubsw xmm3, xmm4
+ lea eax, [eax + 64]
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ psrlw xmm0, 7
+ psrlw xmm2, 7
+ packuswb xmm0, xmm2
+ paddb xmm0, xmm5
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void RGBAToYRow_SSSE3(const uint8* src_argb, uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_y */
+ mov ecx, [esp + 12] /* pix */
+ movdqa xmm4, kRGBAToY
+ movdqa xmm5, kAddY16
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm4
+ pmaddubsw xmm1, xmm4
+ pmaddubsw xmm2, xmm4
+ pmaddubsw xmm3, xmm4
+ lea eax, [eax + 64]
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ psrlw xmm0, 7
+ psrlw xmm2, 7
+ packuswb xmm0, xmm2
+ paddb xmm0, xmm5
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void ARGBToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ movdqa xmm5, kAddUV128
+ movdqa xmm6, kARGBToV
+ movdqa xmm7, kARGBToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 16x2 argb pixels to 8x1 */
+ movdqu xmm0, [eax]
+ movdqu xmm4, [eax + esi]
+ pavgb xmm0, xmm4
+ movdqu xmm1, [eax + 16]
+ movdqu xmm4, [eax + esi + 16]
+ pavgb xmm1, xmm4
+ movdqu xmm2, [eax + 32]
+ movdqu xmm4, [eax + esi + 32]
+ pavgb xmm2, xmm4
+ movdqu xmm3, [eax + 48]
+ movdqu xmm4, [eax + esi + 48]
+ pavgb xmm3, xmm4
+
+ lea eax, [eax + 64]
+ movdqa xmm4, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm4, xmm1, 0xdd
+ pavgb xmm0, xmm4
+ movdqa xmm4, xmm2
+ shufps xmm2, xmm3, 0x88
+ shufps xmm4, xmm3, 0xdd
+ pavgb xmm2, xmm4
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 16 different pixels, its 8 pixels of U and 8 of V
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ pmaddubsw xmm0, xmm7 // U
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm6 // V
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm2
+ phaddw xmm1, xmm3
+ psraw xmm0, 8
+ psraw xmm1, 8
+ packsswb xmm0, xmm1
+ paddb xmm0, xmm5 // -> unsigned
+
+ // step 3 - store 8 U and 8 V values
+ movlps qword ptr [edx], xmm0 // U
+ movhps qword ptr [edx + edi], xmm0 // V
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void ARGBToUVJRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ movdqa xmm5, kAddUVJ128
+ movdqa xmm6, kARGBToVJ
+ movdqa xmm7, kARGBToUJ
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 16x2 argb pixels to 8x1 */
+ movdqu xmm0, [eax]
+ movdqu xmm4, [eax + esi]
+ pavgb xmm0, xmm4
+ movdqu xmm1, [eax + 16]
+ movdqu xmm4, [eax + esi + 16]
+ pavgb xmm1, xmm4
+ movdqu xmm2, [eax + 32]
+ movdqu xmm4, [eax + esi + 32]
+ pavgb xmm2, xmm4
+ movdqu xmm3, [eax + 48]
+ movdqu xmm4, [eax + esi + 48]
+ pavgb xmm3, xmm4
+
+ lea eax, [eax + 64]
+ movdqa xmm4, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm4, xmm1, 0xdd
+ pavgb xmm0, xmm4
+ movdqa xmm4, xmm2
+ shufps xmm2, xmm3, 0x88
+ shufps xmm4, xmm3, 0xdd
+ pavgb xmm2, xmm4
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 16 different pixels, its 8 pixels of U and 8 of V
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ pmaddubsw xmm0, xmm7 // U
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm6 // V
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm2
+ phaddw xmm1, xmm3
+ paddw xmm0, xmm5 // +.5 rounding -> unsigned
+ paddw xmm1, xmm5
+ psraw xmm0, 8
+ psraw xmm1, 8
+ packsswb xmm0, xmm1
+
+ // step 3 - store 8 U and 8 V values
+ movlps qword ptr [edx], xmm0 // U
+ movhps qword ptr [edx + edi], xmm0 // V
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+#ifdef HAS_ARGBTOUVROW_AVX2
+__declspec(naked)
+void ARGBToUVRow_AVX2(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ vbroadcastf128 ymm5, kAddUV128
+ vbroadcastf128 ymm6, kARGBToV
+ vbroadcastf128 ymm7, kARGBToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 32x2 argb pixels to 16x1 */
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ vmovdqu ymm2, [eax + 64]
+ vmovdqu ymm3, [eax + 96]
+ vpavgb ymm0, ymm0, [eax + esi]
+ vpavgb ymm1, ymm1, [eax + esi + 32]
+ vpavgb ymm2, ymm2, [eax + esi + 64]
+ vpavgb ymm3, ymm3, [eax + esi + 96]
+ lea eax, [eax + 128]
+ vshufps ymm4, ymm0, ymm1, 0x88
+ vshufps ymm0, ymm0, ymm1, 0xdd
+ vpavgb ymm0, ymm0, ymm4 // mutated by vshufps
+ vshufps ymm4, ymm2, ymm3, 0x88
+ vshufps ymm2, ymm2, ymm3, 0xdd
+ vpavgb ymm2, ymm2, ymm4 // mutated by vshufps
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 32 different pixels, its 16 pixels of U and 16 of V
+ vpmaddubsw ymm1, ymm0, ymm7 // U
+ vpmaddubsw ymm3, ymm2, ymm7
+ vpmaddubsw ymm0, ymm0, ymm6 // V
+ vpmaddubsw ymm2, ymm2, ymm6
+ vphaddw ymm1, ymm1, ymm3 // mutates
+ vphaddw ymm0, ymm0, ymm2
+ vpsraw ymm1, ymm1, 8
+ vpsraw ymm0, ymm0, 8
+ vpacksswb ymm0, ymm1, ymm0 // mutates
+ vpermq ymm0, ymm0, 0xd8 // For vpacksswb
+ vpshufb ymm0, ymm0, kShufARGBToUV_AVX // For vshufps + vphaddw
+ vpaddb ymm0, ymm0, ymm5 // -> unsigned
+
+ // step 3 - store 16 U and 16 V values
+ vextractf128 [edx], ymm0, 0 // U
+ vextractf128 [edx + edi], ymm0, 1 // V
+ lea edx, [edx + 16]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBTOUVROW_AVX2
+
+__declspec(naked)
+void ARGBToUV444Row_SSSE3(const uint8* src_argb0,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_argb
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ movdqa xmm5, kAddUV128
+ movdqa xmm6, kARGBToV
+ movdqa xmm7, kARGBToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* convert to U and V */
+ movdqu xmm0, [eax] // U
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm7
+ pmaddubsw xmm1, xmm7
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm3, xmm7
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ psraw xmm0, 8
+ psraw xmm2, 8
+ packsswb xmm0, xmm2
+ paddb xmm0, xmm5
+ movdqu [edx], xmm0
+
+ movdqu xmm0, [eax] // V
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ pmaddubsw xmm0, xmm6
+ pmaddubsw xmm1, xmm6
+ pmaddubsw xmm2, xmm6
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm1
+ phaddw xmm2, xmm3
+ psraw xmm0, 8
+ psraw xmm2, 8
+ packsswb xmm0, xmm2
+ paddb xmm0, xmm5
+ lea eax, [eax + 64]
+ movdqu [edx + edi], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+
+__declspec(naked)
+void ARGBToUV422Row_SSSE3(const uint8* src_argb0,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_argb
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ movdqa xmm5, kAddUV128
+ movdqa xmm6, kARGBToV
+ movdqa xmm7, kARGBToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 16x2 argb pixels to 8x1 */
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+ lea eax, [eax + 64]
+ movdqa xmm4, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm4, xmm1, 0xdd
+ pavgb xmm0, xmm4
+ movdqa xmm4, xmm2
+ shufps xmm2, xmm3, 0x88
+ shufps xmm4, xmm3, 0xdd
+ pavgb xmm2, xmm4
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 16 different pixels, its 8 pixels of U and 8 of V
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ pmaddubsw xmm0, xmm7 // U
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm6 // V
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm2
+ phaddw xmm1, xmm3
+ psraw xmm0, 8
+ psraw xmm1, 8
+ packsswb xmm0, xmm1
+ paddb xmm0, xmm5 // -> unsigned
+
+ // step 3 - store 8 U and 8 V values
+ movlps qword ptr [edx], xmm0 // U
+ movhps qword ptr [edx + edi], xmm0 // V
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+
+__declspec(naked)
+void BGRAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ movdqa xmm5, kAddUV128
+ movdqa xmm6, kBGRAToV
+ movdqa xmm7, kBGRAToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 16x2 argb pixels to 8x1 */
+ movdqu xmm0, [eax]
+ movdqu xmm4, [eax + esi]
+ pavgb xmm0, xmm4
+ movdqu xmm1, [eax + 16]
+ movdqu xmm4, [eax + esi + 16]
+ pavgb xmm1, xmm4
+ movdqu xmm2, [eax + 32]
+ movdqu xmm4, [eax + esi + 32]
+ pavgb xmm2, xmm4
+ movdqu xmm3, [eax + 48]
+ movdqu xmm4, [eax + esi + 48]
+ pavgb xmm3, xmm4
+
+ lea eax, [eax + 64]
+ movdqa xmm4, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm4, xmm1, 0xdd
+ pavgb xmm0, xmm4
+ movdqa xmm4, xmm2
+ shufps xmm2, xmm3, 0x88
+ shufps xmm4, xmm3, 0xdd
+ pavgb xmm2, xmm4
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 16 different pixels, its 8 pixels of U and 8 of V
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ pmaddubsw xmm0, xmm7 // U
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm6 // V
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm2
+ phaddw xmm1, xmm3
+ psraw xmm0, 8
+ psraw xmm1, 8
+ packsswb xmm0, xmm1
+ paddb xmm0, xmm5 // -> unsigned
+
+ // step 3 - store 8 U and 8 V values
+ movlps qword ptr [edx], xmm0 // U
+ movhps qword ptr [edx + edi], xmm0 // V
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void ABGRToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ movdqa xmm5, kAddUV128
+ movdqa xmm6, kABGRToV
+ movdqa xmm7, kABGRToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 16x2 argb pixels to 8x1 */
+ movdqu xmm0, [eax]
+ movdqu xmm4, [eax + esi]
+ pavgb xmm0, xmm4
+ movdqu xmm1, [eax + 16]
+ movdqu xmm4, [eax + esi + 16]
+ pavgb xmm1, xmm4
+ movdqu xmm2, [eax + 32]
+ movdqu xmm4, [eax + esi + 32]
+ pavgb xmm2, xmm4
+ movdqu xmm3, [eax + 48]
+ movdqu xmm4, [eax + esi + 48]
+ pavgb xmm3, xmm4
+
+ lea eax, [eax + 64]
+ movdqa xmm4, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm4, xmm1, 0xdd
+ pavgb xmm0, xmm4
+ movdqa xmm4, xmm2
+ shufps xmm2, xmm3, 0x88
+ shufps xmm4, xmm3, 0xdd
+ pavgb xmm2, xmm4
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 16 different pixels, its 8 pixels of U and 8 of V
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ pmaddubsw xmm0, xmm7 // U
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm6 // V
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm2
+ phaddw xmm1, xmm3
+ psraw xmm0, 8
+ psraw xmm1, 8
+ packsswb xmm0, xmm1
+ paddb xmm0, xmm5 // -> unsigned
+
+ // step 3 - store 8 U and 8 V values
+ movlps qword ptr [edx], xmm0 // U
+ movhps qword ptr [edx + edi], xmm0 // V
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void RGBAToUVRow_SSSE3(const uint8* src_argb0, int src_stride_argb,
+ uint8* dst_u, uint8* dst_v, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov esi, [esp + 8 + 8] // src_stride_argb
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ movdqa xmm5, kAddUV128
+ movdqa xmm6, kRGBAToV
+ movdqa xmm7, kRGBAToU
+ sub edi, edx // stride from u to v
+
+ convertloop:
+ /* step 1 - subsample 16x2 argb pixels to 8x1 */
+ movdqu xmm0, [eax]
+ movdqu xmm4, [eax + esi]
+ pavgb xmm0, xmm4
+ movdqu xmm1, [eax + 16]
+ movdqu xmm4, [eax + esi + 16]
+ pavgb xmm1, xmm4
+ movdqu xmm2, [eax + 32]
+ movdqu xmm4, [eax + esi + 32]
+ pavgb xmm2, xmm4
+ movdqu xmm3, [eax + 48]
+ movdqu xmm4, [eax + esi + 48]
+ pavgb xmm3, xmm4
+
+ lea eax, [eax + 64]
+ movdqa xmm4, xmm0
+ shufps xmm0, xmm1, 0x88
+ shufps xmm4, xmm1, 0xdd
+ pavgb xmm0, xmm4
+ movdqa xmm4, xmm2
+ shufps xmm2, xmm3, 0x88
+ shufps xmm4, xmm3, 0xdd
+ pavgb xmm2, xmm4
+
+ // step 2 - convert to U and V
+ // from here down is very similar to Y code except
+ // instead of 16 different pixels, its 8 pixels of U and 8 of V
+ movdqa xmm1, xmm0
+ movdqa xmm3, xmm2
+ pmaddubsw xmm0, xmm7 // U
+ pmaddubsw xmm2, xmm7
+ pmaddubsw xmm1, xmm6 // V
+ pmaddubsw xmm3, xmm6
+ phaddw xmm0, xmm2
+ phaddw xmm1, xmm3
+ psraw xmm0, 8
+ psraw xmm1, 8
+ packsswb xmm0, xmm1
+ paddb xmm0, xmm5 // -> unsigned
+
+ // step 3 - store 8 U and 8 V values
+ movlps qword ptr [edx], xmm0 // U
+ movhps qword ptr [edx + edi], xmm0 // V
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBTOYROW_SSSE3
+
+// Read 16 UV from 444
+#define READYUV444_AVX2 __asm { \
+ __asm vmovdqu xmm0, [esi] /* U */ /* NOLINT */ \
+ __asm vmovdqu xmm1, [esi + edi] /* V */ /* NOLINT */ \
+ __asm lea esi, [esi + 16] \
+ __asm vpermq ymm0, ymm0, 0xd8 \
+ __asm vpermq ymm1, ymm1, 0xd8 \
+ __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \
+ }
+
+// Read 8 UV from 422, upsample to 16 UV.
+#define READYUV422_AVX2 __asm { \
+ __asm vmovq xmm0, qword ptr [esi] /* U */ /* NOLINT */ \
+ __asm vmovq xmm1, qword ptr [esi + edi] /* V */ /* NOLINT */ \
+ __asm lea esi, [esi + 8] \
+ __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \
+ __asm vpermq ymm0, ymm0, 0xd8 \
+ __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \
+ }
+
+// Read 4 UV from 411, upsample to 16 UV.
+#define READYUV411_AVX2 __asm { \
+ __asm vmovd xmm0, dword ptr [esi] /* U */ /* NOLINT */ \
+ __asm vmovd xmm1, dword ptr [esi + edi] /* V */ /* NOLINT */ \
+ __asm lea esi, [esi + 4] \
+ __asm vpunpcklbw ymm0, ymm0, ymm1 /* UV */ \
+ __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \
+ __asm vpermq ymm0, ymm0, 0xd8 \
+ __asm vpunpckldq ymm0, ymm0, ymm0 /* UVUVUVUV (upsample) */ \
+ }
+
+// Read 8 UV from NV12, upsample to 16 UV.
+#define READNV12_AVX2 __asm { \
+ __asm vmovdqu xmm0, [esi] /* UV */ \
+ __asm lea esi, [esi + 16] \
+ __asm vpermq ymm0, ymm0, 0xd8 \
+ __asm vpunpcklwd ymm0, ymm0, ymm0 /* UVUV (upsample) */ \
+ }
+
+// Convert 16 pixels: 16 UV and 16 Y.
+#define YUVTORGB_AVX2(YuvConstants) __asm { \
+ /* Step 1: Find 8 UV contributions to 16 R,G,B values */ \
+ __asm vpmaddubsw ymm2, ymm0, YuvConstants.kUVToR /* scale R UV */ \
+ __asm vpmaddubsw ymm1, ymm0, YuvConstants.kUVToG /* scale G UV */ \
+ __asm vpmaddubsw ymm0, ymm0, YuvConstants.kUVToB /* scale B UV */ \
+ __asm vmovdqu ymm3, YuvConstants.kUVBiasR \
+ __asm vpsubw ymm2, ymm3, ymm2 \
+ __asm vmovdqu ymm3, YuvConstants.kUVBiasG \
+ __asm vpsubw ymm1, ymm3, ymm1 \
+ __asm vmovdqu ymm3, YuvConstants.kUVBiasB \
+ __asm vpsubw ymm0, ymm3, ymm0 \
+ /* Step 2: Find Y contribution to 16 R,G,B values */ \
+ __asm vmovdqu xmm3, [eax] /* NOLINT */ \
+ __asm lea eax, [eax + 16] \
+ __asm vpermq ymm3, ymm3, 0xd8 \
+ __asm vpunpcklbw ymm3, ymm3, ymm3 \
+ __asm vpmulhuw ymm3, ymm3, YuvConstants.kYToRgb \
+ __asm vpaddsw ymm0, ymm0, ymm3 /* B += Y */ \
+ __asm vpaddsw ymm1, ymm1, ymm3 /* G += Y */ \
+ __asm vpaddsw ymm2, ymm2, ymm3 /* R += Y */ \
+ __asm vpsraw ymm0, ymm0, 6 \
+ __asm vpsraw ymm1, ymm1, 6 \
+ __asm vpsraw ymm2, ymm2, 6 \
+ __asm vpackuswb ymm0, ymm0, ymm0 /* B */ \
+ __asm vpackuswb ymm1, ymm1, ymm1 /* G */ \
+ __asm vpackuswb ymm2, ymm2, ymm2 /* R */ \
+ }
+
+// Store 16 ARGB values.
+#define STOREARGB_AVX2 __asm { \
+ /* Step 3: Weave into ARGB */ \
+ __asm vpunpcklbw ymm0, ymm0, ymm1 /* BG */ \
+ __asm vpermq ymm0, ymm0, 0xd8 \
+ __asm vpunpcklbw ymm2, ymm2, ymm5 /* RA */ \
+ __asm vpermq ymm2, ymm2, 0xd8 \
+ __asm vpunpcklwd ymm1, ymm0, ymm2 /* BGRA first 8 pixels */ \
+ __asm vpunpckhwd ymm0, ymm0, ymm2 /* BGRA next 8 pixels */ \
+ __asm vmovdqu 0[edx], ymm1 \
+ __asm vmovdqu 32[edx], ymm0 \
+ __asm lea edx, [edx + 64] \
+ }
+
+#ifdef HAS_I422TOARGBROW_AVX2
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+__declspec(naked)
+void I422ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+ STOREARGB_AVX2
+
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I422TOARGBROW_AVX2
+
+#ifdef HAS_J422TOARGBROW_AVX2
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+__declspec(naked)
+void J422ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvJConstants)
+ STOREARGB_AVX2
+
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_J422TOARGBROW_AVX2
+
+#ifdef HAS_I444TOARGBROW_AVX2
+// 16 pixels
+// 16 UV values with 16 Y producing 16 ARGB (64 bytes).
+__declspec(naked)
+void I444ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV444_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+ STOREARGB_AVX2
+
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I444TOARGBROW_AVX2
+
+#ifdef HAS_I411TOARGBROW_AVX2
+// 16 pixels
+// 4 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+__declspec(naked)
+void I411ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV411_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+ STOREARGB_AVX2
+
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I411TOARGBROW_AVX2
+
+#ifdef HAS_NV12TOARGBROW_AVX2
+// 16 pixels.
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ARGB (64 bytes).
+__declspec(naked)
+void NV12ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // Y
+ mov esi, [esp + 4 + 8] // UV
+ mov edx, [esp + 4 + 12] // argb
+ mov ecx, [esp + 4 + 16] // width
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READNV12_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+ STOREARGB_AVX2
+
+ sub ecx, 16
+ jg convertloop
+
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_NV12TOARGBROW_AVX2
+
+#ifdef HAS_NV21TOARGBROW_AVX2
+// 16 pixels.
+// 8 VU values upsampled to 16 VU, mixed with 16 Y producing 16 ARGB (64 bytes).
+__declspec(naked)
+void NV21ToARGBRow_AVX2(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // Y
+ mov esi, [esp + 4 + 8] // UV
+ mov edx, [esp + 4 + 12] // argb
+ mov ecx, [esp + 4 + 16] // width
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READNV12_AVX2
+ YUVTORGB_AVX2(kYvuConstants)
+ STOREARGB_AVX2
+
+ sub ecx, 16
+ jg convertloop
+
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_NV21TOARGBROW_AVX2
+
+#ifdef HAS_I422TOBGRAROW_AVX2
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 BGRA (64 bytes).
+// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3.
+__declspec(naked)
+void I422ToBGRARow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into BGRA
+ vpunpcklbw ymm1, ymm1, ymm0 // GB
+ vpermq ymm1, ymm1, 0xd8
+ vpunpcklbw ymm2, ymm5, ymm2 // AR
+ vpermq ymm2, ymm2, 0xd8
+ vpunpcklwd ymm0, ymm2, ymm1 // ARGB first 8 pixels
+ vpunpckhwd ymm2, ymm2, ymm1 // ARGB next 8 pixels
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm2
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I422TOBGRAROW_AVX2
+
+#ifdef HAS_I422TORGBAROW_AVX2
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 RGBA (64 bytes).
+// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3.
+__declspec(naked)
+void I422ToRGBARow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into RGBA
+ vpunpcklbw ymm1, ymm1, ymm2 // GR
+ vpermq ymm1, ymm1, 0xd8
+ vpunpcklbw ymm2, ymm5, ymm0 // AB
+ vpermq ymm2, ymm2, 0xd8
+ vpunpcklwd ymm0, ymm2, ymm1 // ABGR first 8 pixels
+ vpunpckhwd ymm1, ymm2, ymm1 // ABGR next 8 pixels
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I422TORGBAROW_AVX2
+
+#ifdef HAS_I422TOABGRROW_AVX2
+// 16 pixels
+// 8 UV values upsampled to 16 UV, mixed with 16 Y producing 16 ABGR (64 bytes).
+// TODO(fbarchard): Use macros to reduce duplicate code. See SSSE3.
+__declspec(naked)
+void I422ToABGRRow_AVX2(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ vpcmpeqb ymm5, ymm5, ymm5 // generate 0xffffffffffffffff for alpha
+
+ convertloop:
+ READYUV422_AVX2
+ YUVTORGB_AVX2(kYuvConstants)
+
+ // Step 3: Weave into ABGR
+ vpunpcklbw ymm1, ymm2, ymm1 // RG
+ vpermq ymm1, ymm1, 0xd8
+ vpunpcklbw ymm2, ymm0, ymm5 // BA
+ vpermq ymm2, ymm2, 0xd8
+ vpunpcklwd ymm0, ymm1, ymm2 // RGBA first 8 pixels
+ vpunpckhwd ymm1, ymm1, ymm2 // RGBA next 8 pixels
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I422TOABGRROW_AVX2
+
+#if defined(HAS_I422TOARGBROW_SSSE3)
+// TODO(fbarchard): Read that does half size on Y and treats 420 as 444.
+
+// Read 8 UV from 444.
+#define READYUV444 __asm { \
+ __asm movq xmm0, qword ptr [esi] /* U */ /* NOLINT */ \
+ __asm movq xmm1, qword ptr [esi + edi] /* V */ /* NOLINT */ \
+ __asm lea esi, [esi + 8] \
+ __asm punpcklbw xmm0, xmm1 /* UV */ \
+ }
+
+// Read 4 UV from 422, upsample to 8 UV.
+#define READYUV422 __asm { \
+ __asm movd xmm0, [esi] /* U */ \
+ __asm movd xmm1, [esi + edi] /* V */ \
+ __asm lea esi, [esi + 4] \
+ __asm punpcklbw xmm0, xmm1 /* UV */ \
+ __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \
+ }
+
+// Read 2 UV from 411, upsample to 8 UV.
+#define READYUV411 __asm { \
+ __asm movzx ebx, word ptr [esi] /* U */ /* NOLINT */ \
+ __asm movd xmm0, ebx \
+ __asm movzx ebx, word ptr [esi + edi] /* V */ /* NOLINT */ \
+ __asm movd xmm1, ebx \
+ __asm lea esi, [esi + 2] \
+ __asm punpcklbw xmm0, xmm1 /* UV */ \
+ __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \
+ __asm punpckldq xmm0, xmm0 /* UVUVUVUV (upsample) */ \
+ }
+
+// Read 4 UV from NV12, upsample to 8 UV.
+#define READNV12 __asm { \
+ __asm movq xmm0, qword ptr [esi] /* UV */ /* NOLINT */ \
+ __asm lea esi, [esi + 8] \
+ __asm punpcklwd xmm0, xmm0 /* UVUV (upsample) */ \
+ }
+
+// Convert 8 pixels: 8 UV and 8 Y.
+#define YUVTORGB(YuvConstants) __asm { \
+ /* Step 1: Find 4 UV contributions to 8 R,G,B values */ \
+ __asm movdqa xmm1, xmm0 \
+ __asm movdqa xmm2, xmm0 \
+ __asm movdqa xmm3, xmm0 \
+ __asm movdqa xmm0, YuvConstants.kUVBiasB /* unbias back to signed */ \
+ __asm pmaddubsw xmm1, YuvConstants.kUVToB /* scale B UV */ \
+ __asm psubw xmm0, xmm1 \
+ __asm movdqa xmm1, YuvConstants.kUVBiasG \
+ __asm pmaddubsw xmm2, YuvConstants.kUVToG /* scale G UV */ \
+ __asm psubw xmm1, xmm2 \
+ __asm movdqa xmm2, YuvConstants.kUVBiasR \
+ __asm pmaddubsw xmm3, YuvConstants.kUVToR /* scale R UV */ \
+ __asm psubw xmm2, xmm3 \
+ /* Step 2: Find Y contribution to 8 R,G,B values */ \
+ __asm movq xmm3, qword ptr [eax] /* NOLINT */ \
+ __asm lea eax, [eax + 8] \
+ __asm punpcklbw xmm3, xmm3 \
+ __asm pmulhuw xmm3, YuvConstants.kYToRgb \
+ __asm paddsw xmm0, xmm3 /* B += Y */ \
+ __asm paddsw xmm1, xmm3 /* G += Y */ \
+ __asm paddsw xmm2, xmm3 /* R += Y */ \
+ __asm psraw xmm0, 6 \
+ __asm psraw xmm1, 6 \
+ __asm psraw xmm2, 6 \
+ __asm packuswb xmm0, xmm0 /* B */ \
+ __asm packuswb xmm1, xmm1 /* G */ \
+ __asm packuswb xmm2, xmm2 /* R */ \
+ }
+
+// Store 8 ARGB values.
+#define STOREARGB __asm { \
+ /* Step 3: Weave into ARGB */ \
+ __asm punpcklbw xmm0, xmm1 /* BG */ \
+ __asm punpcklbw xmm2, xmm5 /* RA */ \
+ __asm movdqa xmm1, xmm0 \
+ __asm punpcklwd xmm0, xmm2 /* BGRA first 4 pixels */ \
+ __asm punpckhwd xmm1, xmm2 /* BGRA next 4 pixels */ \
+ __asm movdqu 0[edx], xmm0 \
+ __asm movdqu 16[edx], xmm1 \
+ __asm lea edx, [edx + 32] \
+ }
+
+// Store 8 BGRA values.
+#define STOREBGRA __asm { \
+ /* Step 3: Weave into BGRA */ \
+ __asm pcmpeqb xmm5, xmm5 /* generate 0xffffffff for alpha */ \
+ __asm punpcklbw xmm1, xmm0 /* GB */ \
+ __asm punpcklbw xmm5, xmm2 /* AR */ \
+ __asm movdqa xmm0, xmm5 \
+ __asm punpcklwd xmm5, xmm1 /* BGRA first 4 pixels */ \
+ __asm punpckhwd xmm0, xmm1 /* BGRA next 4 pixels */ \
+ __asm movdqu 0[edx], xmm5 \
+ __asm movdqu 16[edx], xmm0 \
+ __asm lea edx, [edx + 32] \
+ }
+
+// Store 8 ABGR values.
+#define STOREABGR __asm { \
+ /* Step 3: Weave into ABGR */ \
+ __asm punpcklbw xmm2, xmm1 /* RG */ \
+ __asm punpcklbw xmm0, xmm5 /* BA */ \
+ __asm movdqa xmm1, xmm2 \
+ __asm punpcklwd xmm2, xmm0 /* RGBA first 4 pixels */ \
+ __asm punpckhwd xmm1, xmm0 /* RGBA next 4 pixels */ \
+ __asm movdqu 0[edx], xmm2 \
+ __asm movdqu 16[edx], xmm1 \
+ __asm lea edx, [edx + 32] \
+ }
+
+// Store 8 RGBA values.
+#define STORERGBA __asm { \
+ /* Step 3: Weave into RGBA */ \
+ __asm pcmpeqb xmm5, xmm5 /* generate 0xffffffff for alpha */ \
+ __asm punpcklbw xmm1, xmm2 /* GR */ \
+ __asm punpcklbw xmm5, xmm0 /* AB */ \
+ __asm movdqa xmm0, xmm5 \
+ __asm punpcklwd xmm5, xmm1 /* RGBA first 4 pixels */ \
+ __asm punpckhwd xmm0, xmm1 /* RGBA next 4 pixels */ \
+ __asm movdqu 0[edx], xmm5 \
+ __asm movdqu 16[edx], xmm0 \
+ __asm lea edx, [edx + 32] \
+ }
+
+// Store 8 RGB24 values.
+#define STORERGB24 __asm { \
+ /* Step 3: Weave into RRGB */ \
+ __asm punpcklbw xmm0, xmm1 /* BG */ \
+ __asm punpcklbw xmm2, xmm2 /* RR */ \
+ __asm movdqa xmm1, xmm0 \
+ __asm punpcklwd xmm0, xmm2 /* BGRR first 4 pixels */ \
+ __asm punpckhwd xmm1, xmm2 /* BGRR next 4 pixels */ \
+ /* Step 4: RRGB -> RGB24 */ \
+ __asm pshufb xmm0, xmm5 /* Pack first 8 and last 4 bytes. */ \
+ __asm pshufb xmm1, xmm6 /* Pack first 12 bytes. */ \
+ __asm palignr xmm1, xmm0, 12 /* last 4 bytes of xmm0 + 12 xmm1 */ \
+ __asm movq qword ptr 0[edx], xmm0 /* First 8 bytes */ \
+ __asm movdqu 8[edx], xmm1 /* Last 16 bytes */ \
+ __asm lea edx, [edx + 24] \
+ }
+
+// Store 8 RAW values.
+#define STORERAW __asm { \
+ /* Step 3: Weave into RRGB */ \
+ __asm punpcklbw xmm0, xmm1 /* BG */ \
+ __asm punpcklbw xmm2, xmm2 /* RR */ \
+ __asm movdqa xmm1, xmm0 \
+ __asm punpcklwd xmm0, xmm2 /* BGRR first 4 pixels */ \
+ __asm punpckhwd xmm1, xmm2 /* BGRR next 4 pixels */ \
+ /* Step 4: RRGB -> RAW */ \
+ __asm pshufb xmm0, xmm5 /* Pack first 8 and last 4 bytes. */ \
+ __asm pshufb xmm1, xmm6 /* Pack first 12 bytes. */ \
+ __asm palignr xmm1, xmm0, 12 /* last 4 bytes of xmm0 + 12 xmm1 */ \
+ __asm movq qword ptr 0[edx], xmm0 /* First 8 bytes */ \
+ __asm movdqu 8[edx], xmm1 /* Last 16 bytes */ \
+ __asm lea edx, [edx + 24] \
+ }
+
+// Store 8 RGB565 values.
+#define STORERGB565 __asm { \
+ /* Step 3: Weave into RRGB */ \
+ __asm punpcklbw xmm0, xmm1 /* BG */ \
+ __asm punpcklbw xmm2, xmm2 /* RR */ \
+ __asm movdqa xmm1, xmm0 \
+ __asm punpcklwd xmm0, xmm2 /* BGRR first 4 pixels */ \
+ __asm punpckhwd xmm1, xmm2 /* BGRR next 4 pixels */ \
+ /* Step 4: RRGB -> RGB565 */ \
+ __asm movdqa xmm3, xmm0 /* B first 4 pixels of argb */ \
+ __asm movdqa xmm2, xmm0 /* G */ \
+ __asm pslld xmm0, 8 /* R */ \
+ __asm psrld xmm3, 3 /* B */ \
+ __asm psrld xmm2, 5 /* G */ \
+ __asm psrad xmm0, 16 /* R */ \
+ __asm pand xmm3, xmm5 /* B */ \
+ __asm pand xmm2, xmm6 /* G */ \
+ __asm pand xmm0, xmm7 /* R */ \
+ __asm por xmm3, xmm2 /* BG */ \
+ __asm por xmm0, xmm3 /* BGR */ \
+ __asm movdqa xmm3, xmm1 /* B next 4 pixels of argb */ \
+ __asm movdqa xmm2, xmm1 /* G */ \
+ __asm pslld xmm1, 8 /* R */ \
+ __asm psrld xmm3, 3 /* B */ \
+ __asm psrld xmm2, 5 /* G */ \
+ __asm psrad xmm1, 16 /* R */ \
+ __asm pand xmm3, xmm5 /* B */ \
+ __asm pand xmm2, xmm6 /* G */ \
+ __asm pand xmm1, xmm7 /* R */ \
+ __asm por xmm3, xmm2 /* BG */ \
+ __asm por xmm1, xmm3 /* BGR */ \
+ __asm packssdw xmm0, xmm1 \
+ __asm movdqu 0[edx], xmm0 /* store 8 pixels of RGB565 */ \
+ __asm lea edx, [edx + 16] \
+ }
+
+// 8 pixels.
+// 8 UV values, mixed with 8 Y producing 8 ARGB (32 bytes).
+__declspec(naked)
+void I444ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READYUV444
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels.
+// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB24 (24 bytes).
+__declspec(naked)
+void I422ToRGB24Row_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_rgb24,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // rgb24
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ movdqa xmm5, kShuffleMaskARGBToRGB24_0
+ movdqa xmm6, kShuffleMaskARGBToRGB24
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STORERGB24
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels.
+// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RAW (24 bytes).
+__declspec(naked)
+void I422ToRAWRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_raw,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // raw
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ movdqa xmm5, kShuffleMaskARGBToRAW_0
+ movdqa xmm6, kShuffleMaskARGBToRAW
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STORERAW
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels
+// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 RGB565 (16 bytes).
+__declspec(naked)
+void I422ToRGB565Row_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb565_buf,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // rgb565
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ pcmpeqb xmm5, xmm5 // generate mask 0x0000001f
+ psrld xmm5, 27
+ pcmpeqb xmm6, xmm6 // generate mask 0x000007e0
+ psrld xmm6, 26
+ pslld xmm6, 5
+ pcmpeqb xmm7, xmm7 // generate mask 0xfffff800
+ pslld xmm7, 11
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STORERGB565
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels.
+// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
+__declspec(naked)
+void I422ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels.
+// JPeg color space version of I422ToARGB
+// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
+__declspec(naked)
+void J422ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // argb
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvJConstants)
+ STOREARGB
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels.
+// 2 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
+// Similar to I420 but duplicate UV once more.
+__declspec(naked)
+void I411ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push ebx
+ push esi
+ push edi
+ mov eax, [esp + 12 + 4] // Y
+ mov esi, [esp + 12 + 8] // U
+ mov edi, [esp + 12 + 12] // V
+ mov edx, [esp + 12 + 16] // argb
+ mov ecx, [esp + 12 + 20] // width
+ sub edi, esi
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READYUV411 // modifies EBX
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ pop ebx
+ ret
+ }
+}
+
+// 8 pixels.
+// 4 UV values upsampled to 8 UV, mixed with 8 Y producing 8 ARGB (32 bytes).
+__declspec(naked)
+void NV12ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // Y
+ mov esi, [esp + 4 + 8] // UV
+ mov edx, [esp + 4 + 12] // argb
+ mov ecx, [esp + 4 + 16] // width
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READNV12
+ YUVTORGB(kYuvConstants)
+ STOREARGB
+
+ sub ecx, 8
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+
+// 8 pixels.
+// 4 VU values upsampled to 8 VU, mixed with 8 Y producing 8 ARGB (32 bytes).
+__declspec(naked)
+void NV21ToARGBRow_SSSE3(const uint8* y_buf,
+ const uint8* uv_buf,
+ uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // Y
+ mov esi, [esp + 4 + 8] // UV
+ mov edx, [esp + 4 + 12] // argb
+ mov ecx, [esp + 4 + 16] // width
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READNV12
+ YUVTORGB(kYvuConstants)
+ STOREARGB
+
+ sub ecx, 8
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void I422ToBGRARow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_bgra,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // bgra
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREBGRA
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void I422ToABGRRow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_abgr,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // abgr
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+ pcmpeqb xmm5, xmm5 // generate 0xffffffff for alpha
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STOREABGR
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void I422ToRGBARow_SSSE3(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* dst_rgba,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // Y
+ mov esi, [esp + 8 + 8] // U
+ mov edi, [esp + 8 + 12] // V
+ mov edx, [esp + 8 + 16] // rgba
+ mov ecx, [esp + 8 + 20] // width
+ sub edi, esi
+
+ convertloop:
+ READYUV422
+ YUVTORGB(kYuvConstants)
+ STORERGBA
+
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+#endif // HAS_I422TOARGBROW_SSSE3
+
+#ifdef HAS_I400TOARGBROW_SSE2
+// 8 pixels of Y converted to 8 pixels of ARGB (32 bytes).
+__declspec(naked)
+void I400ToARGBRow_SSE2(const uint8* y_buf,
+ uint8* rgb_buf,
+ int width) {
+ __asm {
+ mov eax, 0x4a354a35 // 4a35 = 18997 = round(1.164 * 64 * 256)
+ movd xmm2, eax
+ pshufd xmm2, xmm2,0
+ mov eax, 0x04880488 // 0488 = 1160 = round(1.164 * 64 * 16)
+ movd xmm3, eax
+ pshufd xmm3, xmm3, 0
+ pcmpeqb xmm4, xmm4 // generate mask 0xff000000
+ pslld xmm4, 24
+
+ mov eax, [esp + 4] // Y
+ mov edx, [esp + 8] // rgb
+ mov ecx, [esp + 12] // width
+
+ convertloop:
+ // Step 1: Scale Y contribution to 8 G values. G = (y - 16) * 1.164
+ movq xmm0, qword ptr [eax]
+ lea eax, [eax + 8]
+ punpcklbw xmm0, xmm0 // Y.Y
+ pmulhuw xmm0, xmm2
+ psubusw xmm0, xmm3
+ psrlw xmm0, 6
+ packuswb xmm0, xmm0 // G
+
+ // Step 2: Weave into ARGB
+ punpcklbw xmm0, xmm0 // GG
+ movdqa xmm1, xmm0
+ punpcklwd xmm0, xmm0 // BGRA first 4 pixels
+ punpckhwd xmm1, xmm1 // BGRA next 4 pixels
+ por xmm0, xmm4
+ por xmm1, xmm4
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_I400TOARGBROW_SSE2
+
+#ifdef HAS_I400TOARGBROW_AVX2
+// 16 pixels of Y converted to 16 pixels of ARGB (64 bytes).
+// note: vpunpcklbw mutates and vpackuswb unmutates.
+__declspec(naked)
+void I400ToARGBRow_AVX2(const uint8* y_buf,
+ uint8* rgb_buf,
+ int width) {
+ __asm {
+ mov eax, 0x4a354a35 // 4a35 = 18997 = round(1.164 * 64 * 256)
+ vmovd xmm2, eax
+ vbroadcastss ymm2, xmm2
+ mov eax, 0x04880488 // 0488 = 1160 = round(1.164 * 64 * 16)
+ vmovd xmm3, eax
+ vbroadcastss ymm3, xmm3
+ vpcmpeqb ymm4, ymm4, ymm4 // generate mask 0xff000000
+ vpslld ymm4, ymm4, 24
+
+ mov eax, [esp + 4] // Y
+ mov edx, [esp + 8] // rgb
+ mov ecx, [esp + 12] // width
+
+ convertloop:
+ // Step 1: Scale Y contriportbution to 16 G values. G = (y - 16) * 1.164
+ vmovdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ vpermq ymm0, ymm0, 0xd8 // vpunpcklbw mutates
+ vpunpcklbw ymm0, ymm0, ymm0 // Y.Y
+ vpmulhuw ymm0, ymm0, ymm2
+ vpsubusw ymm0, ymm0, ymm3
+ vpsrlw ymm0, ymm0, 6
+ vpackuswb ymm0, ymm0, ymm0 // G. still mutated: 3120
+
+ // TODO(fbarchard): Weave alpha with unpack.
+ // Step 2: Weave into ARGB
+ vpunpcklbw ymm1, ymm0, ymm0 // GG - mutates
+ vpermq ymm1, ymm1, 0xd8
+ vpunpcklwd ymm0, ymm1, ymm1 // GGGG first 8 pixels
+ vpunpckhwd ymm1, ymm1, ymm1 // GGGG next 8 pixels
+ vpor ymm0, ymm0, ymm4
+ vpor ymm1, ymm1, ymm4
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_I400TOARGBROW_AVX2
+
+#ifdef HAS_MIRRORROW_SSSE3
+// Shuffle table for reversing the bytes.
+static const uvec8 kShuffleMirror = {
+ 15u, 14u, 13u, 12u, 11u, 10u, 9u, 8u, 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
+};
+
+// TODO(fbarchard): Replace lea with -16 offset.
+__declspec(naked)
+void MirrorRow_SSSE3(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // width
+ movdqa xmm5, kShuffleMirror
+
+ convertloop:
+ movdqu xmm0, [eax - 16 + ecx]
+ pshufb xmm0, xmm5
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_MIRRORROW_SSSE3
+
+#ifdef HAS_MIRRORROW_AVX2
+__declspec(naked)
+void MirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // width
+ vbroadcastf128 ymm5, kShuffleMirror
+
+ convertloop:
+ vmovdqu ymm0, [eax - 32 + ecx]
+ vpshufb ymm0, ymm0, ymm5
+ vpermq ymm0, ymm0, 0x4e // swap high and low halfs
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_MIRRORROW_AVX2
+
+#ifdef HAS_MIRRORROW_SSE2
+__declspec(naked)
+void MirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // width
+
+ convertloop:
+ movdqu xmm0, [eax - 16 + ecx]
+ movdqa xmm1, xmm0 // swap bytes
+ psllw xmm0, 8
+ psrlw xmm1, 8
+ por xmm0, xmm1
+ pshuflw xmm0, xmm0, 0x1b // swap words
+ pshufhw xmm0, xmm0, 0x1b
+ pshufd xmm0, xmm0, 0x4e // swap qwords
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_MIRRORROW_SSE2
+
+#ifdef HAS_MIRRORROW_UV_SSSE3
+// Shuffle table for reversing the bytes of UV channels.
+static const uvec8 kShuffleMirrorUV = {
+ 14u, 12u, 10u, 8u, 6u, 4u, 2u, 0u, 15u, 13u, 11u, 9u, 7u, 5u, 3u, 1u
+};
+
+__declspec(naked)
+void MirrorUVRow_SSSE3(const uint8* src, uint8* dst_u, uint8* dst_v,
+ int width) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // width
+ movdqa xmm1, kShuffleMirrorUV
+ lea eax, [eax + ecx * 2 - 16]
+ sub edi, edx
+
+ convertloop:
+ movdqu xmm0, [eax]
+ lea eax, [eax - 16]
+ pshufb xmm0, xmm1
+ movlpd qword ptr [edx], xmm0
+ movhpd qword ptr [edx + edi], xmm0
+ lea edx, [edx + 8]
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+#endif // HAS_MIRRORROW_UV_SSSE3
+
+#ifdef HAS_ARGBMIRRORROW_SSE2
+__declspec(naked)
+void ARGBMirrorRow_SSE2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // width
+ lea eax, [eax - 16 + ecx * 4] // last 4 pixels.
+
+ convertloop:
+ movdqu xmm0, [eax]
+ lea eax, [eax - 16]
+ pshufd xmm0, xmm0, 0x1b
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_ARGBMIRRORROW_SSE2
+
+#ifdef HAS_ARGBMIRRORROW_AVX2
+// Shuffle table for reversing the bytes.
+static const ulvec32 kARGBShuffleMirror_AVX2 = {
+ 7u, 6u, 5u, 4u, 3u, 2u, 1u, 0u
+};
+
+__declspec(naked)
+void ARGBMirrorRow_AVX2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // width
+ vmovdqu ymm5, kARGBShuffleMirror_AVX2
+
+ convertloop:
+ vpermd ymm0, ymm5, [eax - 32 + ecx * 4] // permute dword order
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBMIRRORROW_AVX2
+
+#ifdef HAS_SPLITUVROW_SSE2
+__declspec(naked)
+void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_uv
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+ sub edi, edx
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ movdqa xmm2, xmm0
+ movdqa xmm3, xmm1
+ pand xmm0, xmm5 // even bytes
+ pand xmm1, xmm5
+ packuswb xmm0, xmm1
+ psrlw xmm2, 8 // odd bytes
+ psrlw xmm3, 8
+ packuswb xmm2, xmm3
+ movdqu [edx], xmm0
+ movdqu [edx + edi], xmm2
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+
+#endif // HAS_SPLITUVROW_SSE2
+
+#ifdef HAS_SPLITUVROW_AVX2
+__declspec(naked)
+void SplitUVRow_AVX2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_uv
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
+ vpsrlw ymm5, ymm5, 8
+ sub edi, edx
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpsrlw ymm2, ymm0, 8 // odd bytes
+ vpsrlw ymm3, ymm1, 8
+ vpand ymm0, ymm0, ymm5 // even bytes
+ vpand ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1
+ vpackuswb ymm2, ymm2, ymm3
+ vpermq ymm0, ymm0, 0xd8
+ vpermq ymm2, ymm2, 0xd8
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + edi], ymm2
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_SPLITUVROW_AVX2
+
+#ifdef HAS_MERGEUVROW_SSE2
+__declspec(naked)
+void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_u
+ mov edx, [esp + 4 + 8] // src_v
+ mov edi, [esp + 4 + 12] // dst_uv
+ mov ecx, [esp + 4 + 16] // width
+ sub edx, eax
+
+ convertloop:
+ movdqu xmm0, [eax] // read 16 U's
+ movdqu xmm1, [eax + edx] // and 16 V's
+ lea eax, [eax + 16]
+ movdqa xmm2, xmm0
+ punpcklbw xmm0, xmm1 // first 8 UV pairs
+ punpckhbw xmm2, xmm1 // next 8 UV pairs
+ movdqu [edi], xmm0
+ movdqu [edi + 16], xmm2
+ lea edi, [edi + 32]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+#endif // HAS_MERGEUVROW_SSE2
+
+#ifdef HAS_MERGEUVROW_AVX2
+__declspec(naked)
+void MergeUVRow_AVX2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+ int width) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_u
+ mov edx, [esp + 4 + 8] // src_v
+ mov edi, [esp + 4 + 12] // dst_uv
+ mov ecx, [esp + 4 + 16] // width
+ sub edx, eax
+
+ convertloop:
+ vmovdqu ymm0, [eax] // read 32 U's
+ vmovdqu ymm1, [eax + edx] // and 32 V's
+ lea eax, [eax + 32]
+ vpunpcklbw ymm2, ymm0, ymm1 // low 16 UV pairs. mutated qqword 0,2
+ vpunpckhbw ymm0, ymm0, ymm1 // high 16 UV pairs. mutated qqword 1,3
+ vextractf128 [edi], ymm2, 0 // bytes 0..15
+ vextractf128 [edi + 16], ymm0, 0 // bytes 16..31
+ vextractf128 [edi + 32], ymm2, 1 // bytes 32..47
+ vextractf128 [edi + 48], ymm0, 1 // bytes 47..63
+ lea edi, [edi + 64]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_MERGEUVROW_AVX2
+
+#ifdef HAS_COPYROW_SSE2
+// CopyRow copys 'count' bytes using a 16 byte load/store, 32 bytes at time.
+__declspec(naked)
+void CopyRow_SSE2(const uint8* src, uint8* dst, int count) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_COPYROW_SSE2
+
+#ifdef HAS_COPYROW_AVX
+// CopyRow copys 'count' bytes using a 32 byte load/store, 64 bytes at time.
+__declspec(naked)
+void CopyRow_AVX(const uint8* src, uint8* dst, int count) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 64
+ jg convertloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_COPYROW_AVX
+
+// Multiple of 1.
+__declspec(naked)
+void CopyRow_ERMS(const uint8* src, uint8* dst, int count) {
+ __asm {
+ mov eax, esi
+ mov edx, edi
+ mov esi, [esp + 4] // src
+ mov edi, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+ rep movsb
+ mov edi, edx
+ mov esi, eax
+ ret
+ }
+}
+
+#ifdef HAS_ARGBCOPYALPHAROW_SSE2
+// width in pixels
+__declspec(naked)
+void ARGBCopyAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+ pcmpeqb xmm0, xmm0 // generate mask 0xff000000
+ pslld xmm0, 24
+ pcmpeqb xmm1, xmm1 // generate mask 0x00ffffff
+ psrld xmm1, 8
+
+ convertloop:
+ movdqu xmm2, [eax]
+ movdqu xmm3, [eax + 16]
+ lea eax, [eax + 32]
+ movdqu xmm4, [edx]
+ movdqu xmm5, [edx + 16]
+ pand xmm2, xmm0
+ pand xmm3, xmm0
+ pand xmm4, xmm1
+ pand xmm5, xmm1
+ por xmm2, xmm4
+ por xmm3, xmm5
+ movdqu [edx], xmm2
+ movdqu [edx + 16], xmm3
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+
+ ret
+ }
+}
+#endif // HAS_ARGBCOPYALPHAROW_SSE2
+
+#ifdef HAS_ARGBCOPYALPHAROW_AVX2
+// width in pixels
+__declspec(naked)
+void ARGBCopyAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+ vpcmpeqb ymm0, ymm0, ymm0
+ vpsrld ymm0, ymm0, 8 // generate mask 0x00ffffff
+
+ convertloop:
+ vmovdqu ymm1, [eax]
+ vmovdqu ymm2, [eax + 32]
+ lea eax, [eax + 64]
+ vpblendvb ymm1, ymm1, [edx], ymm0
+ vpblendvb ymm2, ymm2, [edx + 32], ymm0
+ vmovdqu [edx], ymm1
+ vmovdqu [edx + 32], ymm2
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBCOPYALPHAROW_AVX2
+
+#ifdef HAS_ARGBCOPYYTOALPHAROW_SSE2
+// width in pixels
+__declspec(naked)
+void ARGBCopyYToAlphaRow_SSE2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+ pcmpeqb xmm0, xmm0 // generate mask 0xff000000
+ pslld xmm0, 24
+ pcmpeqb xmm1, xmm1 // generate mask 0x00ffffff
+ psrld xmm1, 8
+
+ convertloop:
+ movq xmm2, qword ptr [eax] // 8 Y's
+ lea eax, [eax + 8]
+ punpcklbw xmm2, xmm2
+ punpckhwd xmm3, xmm2
+ punpcklwd xmm2, xmm2
+ movdqu xmm4, [edx]
+ movdqu xmm5, [edx + 16]
+ pand xmm2, xmm0
+ pand xmm3, xmm0
+ pand xmm4, xmm1
+ pand xmm5, xmm1
+ por xmm2, xmm4
+ por xmm3, xmm5
+ movdqu [edx], xmm2
+ movdqu [edx + 16], xmm3
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+
+ ret
+ }
+}
+#endif // HAS_ARGBCOPYYTOALPHAROW_SSE2
+
+#ifdef HAS_ARGBCOPYYTOALPHAROW_AVX2
+// width in pixels
+__declspec(naked)
+void ARGBCopyYToAlphaRow_AVX2(const uint8* src, uint8* dst, int width) {
+ __asm {
+ mov eax, [esp + 4] // src
+ mov edx, [esp + 8] // dst
+ mov ecx, [esp + 12] // count
+ vpcmpeqb ymm0, ymm0, ymm0
+ vpsrld ymm0, ymm0, 8 // generate mask 0x00ffffff
+
+ convertloop:
+ vpmovzxbd ymm1, qword ptr [eax]
+ vpmovzxbd ymm2, qword ptr [eax + 8]
+ lea eax, [eax + 16]
+ vpslld ymm1, ymm1, 24
+ vpslld ymm2, ymm2, 24
+ vpblendvb ymm1, ymm1, [edx], ymm0
+ vpblendvb ymm2, ymm2, [edx + 32], ymm0
+ vmovdqu [edx], ymm1
+ vmovdqu [edx + 32], ymm2
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBCOPYYTOALPHAROW_AVX2
+
+#ifdef HAS_SETROW_X86
+// Write 'count' bytes using an 8 bit value repeated.
+// Count should be multiple of 4.
+__declspec(naked)
+void SetRow_X86(uint8* dst, uint8 v8, int count) {
+ __asm {
+ movzx eax, byte ptr [esp + 8] // v8
+ mov edx, 0x01010101 // Duplicate byte to all bytes.
+ mul edx // overwrites edx with upper part of result.
+ mov edx, edi
+ mov edi, [esp + 4] // dst
+ mov ecx, [esp + 12] // count
+ shr ecx, 2
+ rep stosd
+ mov edi, edx
+ ret
+ }
+}
+
+// Write 'count' bytes using an 8 bit value repeated.
+__declspec(naked)
+void SetRow_ERMS(uint8* dst, uint8 v8, int count) {
+ __asm {
+ mov edx, edi
+ mov edi, [esp + 4] // dst
+ mov eax, [esp + 8] // v8
+ mov ecx, [esp + 12] // count
+ rep stosb
+ mov edi, edx
+ ret
+ }
+}
+
+// Write 'count' 32 bit values.
+__declspec(naked)
+void ARGBSetRow_X86(uint8* dst_argb, uint32 v32, int count) {
+ __asm {
+ mov edx, edi
+ mov edi, [esp + 4] // dst
+ mov eax, [esp + 8] // v32
+ mov ecx, [esp + 12] // count
+ rep stosd
+ mov edi, edx
+ ret
+ }
+}
+#endif // HAS_SETROW_X86
+
+#ifdef HAS_YUY2TOYROW_AVX2
+__declspec(naked)
+void YUY2ToYRow_AVX2(const uint8* src_yuy2,
+ uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_yuy2
+ mov edx, [esp + 8] // dst_y
+ mov ecx, [esp + 12] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
+ vpsrlw ymm5, ymm5, 8
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpand ymm0, ymm0, ymm5 // even bytes are Y
+ vpand ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1 // mutates.
+ vpermq ymm0, ymm0, 0xd8
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+
+__declspec(naked)
+void YUY2ToUVRow_AVX2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_yuy2
+ mov esi, [esp + 8 + 8] // stride_yuy2
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
+ vpsrlw ymm5, ymm5, 8
+ sub edi, edx
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ vpavgb ymm0, ymm0, [eax + esi]
+ vpavgb ymm1, ymm1, [eax + esi + 32]
+ lea eax, [eax + 64]
+ vpsrlw ymm0, ymm0, 8 // YUYV -> UVUV
+ vpsrlw ymm1, ymm1, 8
+ vpackuswb ymm0, ymm0, ymm1 // mutates.
+ vpermq ymm0, ymm0, 0xd8
+ vpand ymm1, ymm0, ymm5 // U
+ vpsrlw ymm0, ymm0, 8 // V
+ vpackuswb ymm1, ymm1, ymm1 // mutates.
+ vpackuswb ymm0, ymm0, ymm0 // mutates.
+ vpermq ymm1, ymm1, 0xd8
+ vpermq ymm0, ymm0, 0xd8
+ vextractf128 [edx], ymm1, 0 // U
+ vextractf128 [edx + edi], ymm0, 0 // V
+ lea edx, [edx + 16]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+
+__declspec(naked)
+void YUY2ToUV422Row_AVX2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_yuy2
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
+ vpsrlw ymm5, ymm5, 8
+ sub edi, edx
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpsrlw ymm0, ymm0, 8 // YUYV -> UVUV
+ vpsrlw ymm1, ymm1, 8
+ vpackuswb ymm0, ymm0, ymm1 // mutates.
+ vpermq ymm0, ymm0, 0xd8
+ vpand ymm1, ymm0, ymm5 // U
+ vpsrlw ymm0, ymm0, 8 // V
+ vpackuswb ymm1, ymm1, ymm1 // mutates.
+ vpackuswb ymm0, ymm0, ymm0 // mutates.
+ vpermq ymm1, ymm1, 0xd8
+ vpermq ymm0, ymm0, 0xd8
+ vextractf128 [edx], ymm1, 0 // U
+ vextractf128 [edx + edi], ymm0, 0 // V
+ lea edx, [edx + 16]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ vzeroupper
+ ret
+ }
+}
+
+__declspec(naked)
+void UYVYToYRow_AVX2(const uint8* src_uyvy,
+ uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_uyvy
+ mov edx, [esp + 8] // dst_y
+ mov ecx, [esp + 12] // pix
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpsrlw ymm0, ymm0, 8 // odd bytes are Y
+ vpsrlw ymm1, ymm1, 8
+ vpackuswb ymm0, ymm0, ymm1 // mutates.
+ vpermq ymm0, ymm0, 0xd8
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+
+__declspec(naked)
+void UYVYToUVRow_AVX2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_yuy2
+ mov esi, [esp + 8 + 8] // stride_yuy2
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
+ vpsrlw ymm5, ymm5, 8
+ sub edi, edx
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ vpavgb ymm0, ymm0, [eax + esi]
+ vpavgb ymm1, ymm1, [eax + esi + 32]
+ lea eax, [eax + 64]
+ vpand ymm0, ymm0, ymm5 // UYVY -> UVUV
+ vpand ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1 // mutates.
+ vpermq ymm0, ymm0, 0xd8
+ vpand ymm1, ymm0, ymm5 // U
+ vpsrlw ymm0, ymm0, 8 // V
+ vpackuswb ymm1, ymm1, ymm1 // mutates.
+ vpackuswb ymm0, ymm0, ymm0 // mutates.
+ vpermq ymm1, ymm1, 0xd8
+ vpermq ymm0, ymm0, 0xd8
+ vextractf128 [edx], ymm1, 0 // U
+ vextractf128 [edx + edi], ymm0, 0 // V
+ lea edx, [edx + 16]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+
+__declspec(naked)
+void UYVYToUV422Row_AVX2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_yuy2
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff00ff
+ vpsrlw ymm5, ymm5, 8
+ sub edi, edx
+
+ convertloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpand ymm0, ymm0, ymm5 // UYVY -> UVUV
+ vpand ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1 // mutates.
+ vpermq ymm0, ymm0, 0xd8
+ vpand ymm1, ymm0, ymm5 // U
+ vpsrlw ymm0, ymm0, 8 // V
+ vpackuswb ymm1, ymm1, ymm1 // mutates.
+ vpackuswb ymm0, ymm0, ymm0 // mutates.
+ vpermq ymm1, ymm1, 0xd8
+ vpermq ymm0, ymm0, 0xd8
+ vextractf128 [edx], ymm1, 0 // U
+ vextractf128 [edx + edi], ymm0, 0 // V
+ lea edx, [edx + 16]
+ sub ecx, 32
+ jg convertloop
+
+ pop edi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_YUY2TOYROW_AVX2
+
+#ifdef HAS_YUY2TOYROW_SSE2
+__declspec(naked)
+void YUY2ToYRow_SSE2(const uint8* src_yuy2,
+ uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_yuy2
+ mov edx, [esp + 8] // dst_y
+ mov ecx, [esp + 12] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ pand xmm0, xmm5 // even bytes are Y
+ pand xmm1, xmm5
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void YUY2ToUVRow_SSE2(const uint8* src_yuy2, int stride_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_yuy2
+ mov esi, [esp + 8 + 8] // stride_yuy2
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+ sub edi, edx
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + esi]
+ movdqu xmm3, [eax + esi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm0, xmm2
+ pavgb xmm1, xmm3
+ psrlw xmm0, 8 // YUYV -> UVUV
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ movdqa xmm1, xmm0
+ pand xmm0, xmm5 // U
+ packuswb xmm0, xmm0
+ psrlw xmm1, 8 // V
+ packuswb xmm1, xmm1
+ movq qword ptr [edx], xmm0
+ movq qword ptr [edx + edi], xmm1
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void YUY2ToUV422Row_SSE2(const uint8* src_yuy2,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_yuy2
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+ sub edi, edx
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ psrlw xmm0, 8 // YUYV -> UVUV
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ movdqa xmm1, xmm0
+ pand xmm0, xmm5 // U
+ packuswb xmm0, xmm0
+ psrlw xmm1, 8 // V
+ packuswb xmm1, xmm1
+ movq qword ptr [edx], xmm0
+ movq qword ptr [edx + edi], xmm1
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+
+__declspec(naked)
+void UYVYToYRow_SSE2(const uint8* src_uyvy,
+ uint8* dst_y, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_uyvy
+ mov edx, [esp + 8] // dst_y
+ mov ecx, [esp + 12] // pix
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ psrlw xmm0, 8 // odd bytes are Y
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+ ret
+ }
+}
+
+__declspec(naked)
+void UYVYToUVRow_SSE2(const uint8* src_uyvy, int stride_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_yuy2
+ mov esi, [esp + 8 + 8] // stride_yuy2
+ mov edx, [esp + 8 + 12] // dst_u
+ mov edi, [esp + 8 + 16] // dst_v
+ mov ecx, [esp + 8 + 20] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+ sub edi, edx
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + esi]
+ movdqu xmm3, [eax + esi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm0, xmm2
+ pavgb xmm1, xmm3
+ pand xmm0, xmm5 // UYVY -> UVUV
+ pand xmm1, xmm5
+ packuswb xmm0, xmm1
+ movdqa xmm1, xmm0
+ pand xmm0, xmm5 // U
+ packuswb xmm0, xmm0
+ psrlw xmm1, 8 // V
+ packuswb xmm1, xmm1
+ movq qword ptr [edx], xmm0
+ movq qword ptr [edx + edi], xmm1
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void UYVYToUV422Row_SSE2(const uint8* src_uyvy,
+ uint8* dst_u, uint8* dst_v, int pix) {
+ __asm {
+ push edi
+ mov eax, [esp + 4 + 4] // src_yuy2
+ mov edx, [esp + 4 + 8] // dst_u
+ mov edi, [esp + 4 + 12] // dst_v
+ mov ecx, [esp + 4 + 16] // pix
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+ sub edi, edx
+
+ convertloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ pand xmm0, xmm5 // UYVY -> UVUV
+ pand xmm1, xmm5
+ packuswb xmm0, xmm1
+ movdqa xmm1, xmm0
+ pand xmm0, xmm5 // U
+ packuswb xmm0, xmm0
+ psrlw xmm1, 8 // V
+ packuswb xmm1, xmm1
+ movq qword ptr [edx], xmm0
+ movq qword ptr [edx + edi], xmm1
+ lea edx, [edx + 8]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ ret
+ }
+}
+#endif // HAS_YUY2TOYROW_SSE2
+
+#ifdef HAS_ARGBBLENDROW_SSE2
+// Blend 8 pixels at a time.
+__declspec(naked)
+void ARGBBlendRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ pcmpeqb xmm7, xmm7 // generate constant 1
+ psrlw xmm7, 15
+ pcmpeqb xmm6, xmm6 // generate mask 0x00ff00ff
+ psrlw xmm6, 8
+ pcmpeqb xmm5, xmm5 // generate mask 0xff00ff00
+ psllw xmm5, 8
+ pcmpeqb xmm4, xmm4 // generate mask 0xff000000
+ pslld xmm4, 24
+ sub ecx, 4
+ jl convertloop4b // less than 4 pixels?
+
+ // 4 pixel loop.
+ convertloop4:
+ movdqu xmm3, [eax] // src argb
+ lea eax, [eax + 16]
+ movdqa xmm0, xmm3 // src argb
+ pxor xmm3, xmm4 // ~alpha
+ movdqu xmm2, [esi] // _r_b
+ psrlw xmm3, 8 // alpha
+ pshufhw xmm3, xmm3, 0F5h // 8 alpha words
+ pshuflw xmm3, xmm3, 0F5h
+ pand xmm2, xmm6 // _r_b
+ paddw xmm3, xmm7 // 256 - alpha
+ pmullw xmm2, xmm3 // _r_b * alpha
+ movdqu xmm1, [esi] // _a_g
+ lea esi, [esi + 16]
+ psrlw xmm1, 8 // _a_g
+ por xmm0, xmm4 // set alpha to 255
+ pmullw xmm1, xmm3 // _a_g * alpha
+ psrlw xmm2, 8 // _r_b convert to 8 bits again
+ paddusb xmm0, xmm2 // + src argb
+ pand xmm1, xmm5 // a_g_ convert to 8 bits again
+ paddusb xmm0, xmm1 // + src argb
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jge convertloop4
+
+ convertloop4b:
+ add ecx, 4 - 1
+ jl convertloop1b
+
+ // 1 pixel loop.
+ convertloop1:
+ movd xmm3, [eax] // src argb
+ lea eax, [eax + 4]
+ movdqa xmm0, xmm3 // src argb
+ pxor xmm3, xmm4 // ~alpha
+ movd xmm2, [esi] // _r_b
+ psrlw xmm3, 8 // alpha
+ pshufhw xmm3, xmm3, 0F5h // 8 alpha words
+ pshuflw xmm3, xmm3, 0F5h
+ pand xmm2, xmm6 // _r_b
+ paddw xmm3, xmm7 // 256 - alpha
+ pmullw xmm2, xmm3 // _r_b * alpha
+ movd xmm1, [esi] // _a_g
+ lea esi, [esi + 4]
+ psrlw xmm1, 8 // _a_g
+ por xmm0, xmm4 // set alpha to 255
+ pmullw xmm1, xmm3 // _a_g * alpha
+ psrlw xmm2, 8 // _r_b convert to 8 bits again
+ paddusb xmm0, xmm2 // + src argb
+ pand xmm1, xmm5 // a_g_ convert to 8 bits again
+ paddusb xmm0, xmm1 // + src argb
+ movd [edx], xmm0
+ lea edx, [edx + 4]
+ sub ecx, 1
+ jge convertloop1
+
+ convertloop1b:
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBBLENDROW_SSE2
+
+#ifdef HAS_ARGBBLENDROW_SSSE3
+// Shuffle table for isolating alpha.
+static const uvec8 kShuffleAlpha = {
+ 3u, 0x80, 3u, 0x80, 7u, 0x80, 7u, 0x80,
+ 11u, 0x80, 11u, 0x80, 15u, 0x80, 15u, 0x80
+};
+// Same as SSE2, but replaces:
+// psrlw xmm3, 8 // alpha
+// pshufhw xmm3, xmm3, 0F5h // 8 alpha words
+// pshuflw xmm3, xmm3, 0F5h
+// with..
+// pshufb xmm3, kShuffleAlpha // alpha
+// Blend 8 pixels at a time.
+
+__declspec(naked)
+void ARGBBlendRow_SSSE3(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ pcmpeqb xmm7, xmm7 // generate constant 0x0001
+ psrlw xmm7, 15
+ pcmpeqb xmm6, xmm6 // generate mask 0x00ff00ff
+ psrlw xmm6, 8
+ pcmpeqb xmm5, xmm5 // generate mask 0xff00ff00
+ psllw xmm5, 8
+ pcmpeqb xmm4, xmm4 // generate mask 0xff000000
+ pslld xmm4, 24
+ sub ecx, 4
+ jl convertloop4b // less than 4 pixels?
+
+ // 4 pixel loop.
+ convertloop4:
+ movdqu xmm3, [eax] // src argb
+ lea eax, [eax + 16]
+ movdqa xmm0, xmm3 // src argb
+ pxor xmm3, xmm4 // ~alpha
+ movdqu xmm2, [esi] // _r_b
+ pshufb xmm3, kShuffleAlpha // alpha
+ pand xmm2, xmm6 // _r_b
+ paddw xmm3, xmm7 // 256 - alpha
+ pmullw xmm2, xmm3 // _r_b * alpha
+ movdqu xmm1, [esi] // _a_g
+ lea esi, [esi + 16]
+ psrlw xmm1, 8 // _a_g
+ por xmm0, xmm4 // set alpha to 255
+ pmullw xmm1, xmm3 // _a_g * alpha
+ psrlw xmm2, 8 // _r_b convert to 8 bits again
+ paddusb xmm0, xmm2 // + src argb
+ pand xmm1, xmm5 // a_g_ convert to 8 bits again
+ paddusb xmm0, xmm1 // + src argb
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jge convertloop4
+
+ convertloop4b:
+ add ecx, 4 - 1
+ jl convertloop1b
+
+ // 1 pixel loop.
+ convertloop1:
+ movd xmm3, [eax] // src argb
+ lea eax, [eax + 4]
+ movdqa xmm0, xmm3 // src argb
+ pxor xmm3, xmm4 // ~alpha
+ movd xmm2, [esi] // _r_b
+ pshufb xmm3, kShuffleAlpha // alpha
+ pand xmm2, xmm6 // _r_b
+ paddw xmm3, xmm7 // 256 - alpha
+ pmullw xmm2, xmm3 // _r_b * alpha
+ movd xmm1, [esi] // _a_g
+ lea esi, [esi + 4]
+ psrlw xmm1, 8 // _a_g
+ por xmm0, xmm4 // set alpha to 255
+ pmullw xmm1, xmm3 // _a_g * alpha
+ psrlw xmm2, 8 // _r_b convert to 8 bits again
+ paddusb xmm0, xmm2 // + src argb
+ pand xmm1, xmm5 // a_g_ convert to 8 bits again
+ paddusb xmm0, xmm1 // + src argb
+ movd [edx], xmm0
+ lea edx, [edx + 4]
+ sub ecx, 1
+ jge convertloop1
+
+ convertloop1b:
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBBLENDROW_SSSE3
+
+#ifdef HAS_ARGBATTENUATEROW_SSE2
+// Attenuate 4 pixels at a time.
+__declspec(naked)
+void ARGBAttenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width) {
+ __asm {
+ mov eax, [esp + 4] // src_argb0
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // width
+ pcmpeqb xmm4, xmm4 // generate mask 0xff000000
+ pslld xmm4, 24
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ffffff
+ psrld xmm5, 8
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels
+ punpcklbw xmm0, xmm0 // first 2
+ pshufhw xmm2, xmm0, 0FFh // 8 alpha words
+ pshuflw xmm2, xmm2, 0FFh
+ pmulhuw xmm0, xmm2 // rgb * a
+ movdqu xmm1, [eax] // read 4 pixels
+ punpckhbw xmm1, xmm1 // next 2 pixels
+ pshufhw xmm2, xmm1, 0FFh // 8 alpha words
+ pshuflw xmm2, xmm2, 0FFh
+ pmulhuw xmm1, xmm2 // rgb * a
+ movdqu xmm2, [eax] // alphas
+ lea eax, [eax + 16]
+ psrlw xmm0, 8
+ pand xmm2, xmm4
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ pand xmm0, xmm5 // keep original alphas
+ por xmm0, xmm2
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+
+ ret
+ }
+}
+#endif // HAS_ARGBATTENUATEROW_SSE2
+
+#ifdef HAS_ARGBATTENUATEROW_SSSE3
+// Shuffle table duplicating alpha.
+static const uvec8 kShuffleAlpha0 = {
+ 3u, 3u, 3u, 3u, 3u, 3u, 128u, 128u, 7u, 7u, 7u, 7u, 7u, 7u, 128u, 128u,
+};
+static const uvec8 kShuffleAlpha1 = {
+ 11u, 11u, 11u, 11u, 11u, 11u, 128u, 128u,
+ 15u, 15u, 15u, 15u, 15u, 15u, 128u, 128u,
+};
+__declspec(naked)
+void ARGBAttenuateRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
+ __asm {
+ mov eax, [esp + 4] // src_argb0
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // width
+ pcmpeqb xmm3, xmm3 // generate mask 0xff000000
+ pslld xmm3, 24
+ movdqa xmm4, kShuffleAlpha0
+ movdqa xmm5, kShuffleAlpha1
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels
+ pshufb xmm0, xmm4 // isolate first 2 alphas
+ movdqu xmm1, [eax] // read 4 pixels
+ punpcklbw xmm1, xmm1 // first 2 pixel rgbs
+ pmulhuw xmm0, xmm1 // rgb * a
+ movdqu xmm1, [eax] // read 4 pixels
+ pshufb xmm1, xmm5 // isolate next 2 alphas
+ movdqu xmm2, [eax] // read 4 pixels
+ punpckhbw xmm2, xmm2 // next 2 pixel rgbs
+ pmulhuw xmm1, xmm2 // rgb * a
+ movdqu xmm2, [eax] // mask original alpha
+ lea eax, [eax + 16]
+ pand xmm2, xmm3
+ psrlw xmm0, 8
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ por xmm0, xmm2 // copy original alpha
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+
+ ret
+ }
+}
+#endif // HAS_ARGBATTENUATEROW_SSSE3
+
+#ifdef HAS_ARGBATTENUATEROW_AVX2
+// Shuffle table duplicating alpha.
+static const uvec8 kShuffleAlpha_AVX2 = {
+ 6u, 7u, 6u, 7u, 6u, 7u, 128u, 128u, 14u, 15u, 14u, 15u, 14u, 15u, 128u, 128u
+};
+__declspec(naked)
+void ARGBAttenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb, int width) {
+ __asm {
+ mov eax, [esp + 4] // src_argb0
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // width
+ sub edx, eax
+ vbroadcastf128 ymm4,kShuffleAlpha_AVX2
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0xff000000
+ vpslld ymm5, ymm5, 24
+
+ convertloop:
+ vmovdqu ymm6, [eax] // read 8 pixels.
+ vpunpcklbw ymm0, ymm6, ymm6 // low 4 pixels. mutated.
+ vpunpckhbw ymm1, ymm6, ymm6 // high 4 pixels. mutated.
+ vpshufb ymm2, ymm0, ymm4 // low 4 alphas
+ vpshufb ymm3, ymm1, ymm4 // high 4 alphas
+ vpmulhuw ymm0, ymm0, ymm2 // rgb * a
+ vpmulhuw ymm1, ymm1, ymm3 // rgb * a
+ vpand ymm6, ymm6, ymm5 // isolate alpha
+ vpsrlw ymm0, ymm0, 8
+ vpsrlw ymm1, ymm1, 8
+ vpackuswb ymm0, ymm0, ymm1 // unmutated.
+ vpor ymm0, ymm0, ymm6 // copy original alpha
+ vmovdqu [eax + edx], ymm0
+ lea eax, [eax + 32]
+ sub ecx, 8
+ jg convertloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBATTENUATEROW_AVX2
+
+#ifdef HAS_ARGBUNATTENUATEROW_SSE2
+// Unattenuate 4 pixels at a time.
+__declspec(naked)
+void ARGBUnattenuateRow_SSE2(const uint8* src_argb, uint8* dst_argb,
+ int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb0
+ mov edx, [esp + 8 + 8] // dst_argb
+ mov ecx, [esp + 8 + 12] // width
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels
+ movzx esi, byte ptr [eax + 3] // first alpha
+ movzx edi, byte ptr [eax + 7] // second alpha
+ punpcklbw xmm0, xmm0 // first 2
+ movd xmm2, dword ptr fixed_invtbl8[esi * 4]
+ movd xmm3, dword ptr fixed_invtbl8[edi * 4]
+ pshuflw xmm2, xmm2, 040h // first 4 inv_alpha words. 1, a, a, a
+ pshuflw xmm3, xmm3, 040h // next 4 inv_alpha words
+ movlhps xmm2, xmm3
+ pmulhuw xmm0, xmm2 // rgb * a
+
+ movdqu xmm1, [eax] // read 4 pixels
+ movzx esi, byte ptr [eax + 11] // third alpha
+ movzx edi, byte ptr [eax + 15] // forth alpha
+ punpckhbw xmm1, xmm1 // next 2
+ movd xmm2, dword ptr fixed_invtbl8[esi * 4]
+ movd xmm3, dword ptr fixed_invtbl8[edi * 4]
+ pshuflw xmm2, xmm2, 040h // first 4 inv_alpha words
+ pshuflw xmm3, xmm3, 040h // next 4 inv_alpha words
+ movlhps xmm2, xmm3
+ pmulhuw xmm1, xmm2 // rgb * a
+ lea eax, [eax + 16]
+
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBUNATTENUATEROW_SSE2
+
+#ifdef HAS_ARGBUNATTENUATEROW_AVX2
+// Shuffle table duplicating alpha.
+static const uvec8 kUnattenShuffleAlpha_AVX2 = {
+ 0u, 1u, 0u, 1u, 0u, 1u, 6u, 7u, 8u, 9u, 8u, 9u, 8u, 9u, 14u, 15u
+};
+// TODO(fbarchard): Enable USE_GATHER for future hardware if faster.
+// USE_GATHER is not on by default, due to being a slow instruction.
+#ifdef USE_GATHER
+__declspec(naked)
+void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
+ int width) {
+ __asm {
+ mov eax, [esp + 4] // src_argb0
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // width
+ sub edx, eax
+ vbroadcastf128 ymm4, kUnattenShuffleAlpha_AVX2
+
+ convertloop:
+ vmovdqu ymm6, [eax] // read 8 pixels.
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0xffffffff for gather.
+ vpsrld ymm2, ymm6, 24 // alpha in low 8 bits.
+ vpunpcklbw ymm0, ymm6, ymm6 // low 4 pixels. mutated.
+ vpunpckhbw ymm1, ymm6, ymm6 // high 4 pixels. mutated.
+ vpgatherdd ymm3, [ymm2 * 4 + fixed_invtbl8], ymm5 // ymm5 cleared. 1, a
+ vpunpcklwd ymm2, ymm3, ymm3 // low 4 inverted alphas. mutated. 1, 1, a, a
+ vpunpckhwd ymm3, ymm3, ymm3 // high 4 inverted alphas. mutated.
+ vpshufb ymm2, ymm2, ymm4 // replicate low 4 alphas. 1, a, a, a
+ vpshufb ymm3, ymm3, ymm4 // replicate high 4 alphas
+ vpmulhuw ymm0, ymm0, ymm2 // rgb * ia
+ vpmulhuw ymm1, ymm1, ymm3 // rgb * ia
+ vpackuswb ymm0, ymm0, ymm1 // unmutated.
+ vmovdqu [eax + edx], ymm0
+ lea eax, [eax + 32]
+ sub ecx, 8
+ jg convertloop
+
+ vzeroupper
+ ret
+ }
+}
+#else // USE_GATHER
+__declspec(naked)
+void ARGBUnattenuateRow_AVX2(const uint8* src_argb, uint8* dst_argb,
+ int width) {
+ __asm {
+
+ mov eax, [esp + 4] // src_argb0
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // width
+ sub edx, eax
+ vbroadcastf128 ymm5, kUnattenShuffleAlpha_AVX2
+
+ push esi
+ push edi
+
+ convertloop:
+ // replace VPGATHER
+ movzx esi, byte ptr [eax + 3] // alpha0
+ movzx edi, byte ptr [eax + 7] // alpha1
+ vmovd xmm0, dword ptr fixed_invtbl8[esi * 4] // [1,a0]
+ vmovd xmm1, dword ptr fixed_invtbl8[edi * 4] // [1,a1]
+ movzx esi, byte ptr [eax + 11] // alpha2
+ movzx edi, byte ptr [eax + 15] // alpha3
+ vpunpckldq xmm6, xmm0, xmm1 // [1,a1,1,a0]
+ vmovd xmm2, dword ptr fixed_invtbl8[esi * 4] // [1,a2]
+ vmovd xmm3, dword ptr fixed_invtbl8[edi * 4] // [1,a3]
+ movzx esi, byte ptr [eax + 19] // alpha4
+ movzx edi, byte ptr [eax + 23] // alpha5
+ vpunpckldq xmm7, xmm2, xmm3 // [1,a3,1,a2]
+ vmovd xmm0, dword ptr fixed_invtbl8[esi * 4] // [1,a4]
+ vmovd xmm1, dword ptr fixed_invtbl8[edi * 4] // [1,a5]
+ movzx esi, byte ptr [eax + 27] // alpha6
+ movzx edi, byte ptr [eax + 31] // alpha7
+ vpunpckldq xmm0, xmm0, xmm1 // [1,a5,1,a4]
+ vmovd xmm2, dword ptr fixed_invtbl8[esi * 4] // [1,a6]
+ vmovd xmm3, dword ptr fixed_invtbl8[edi * 4] // [1,a7]
+ vpunpckldq xmm2, xmm2, xmm3 // [1,a7,1,a6]
+ vpunpcklqdq xmm3, xmm6, xmm7 // [1,a3,1,a2,1,a1,1,a0]
+ vpunpcklqdq xmm0, xmm0, xmm2 // [1,a7,1,a6,1,a5,1,a4]
+ vinserti128 ymm3, ymm3, xmm0, 1 // [1,a7,1,a6,1,a5,1,a4,1,a3,1,a2,1,a1,1,a0]
+ // end of VPGATHER
+
+ vmovdqu ymm6, [eax] // read 8 pixels.
+ vpunpcklbw ymm0, ymm6, ymm6 // low 4 pixels. mutated.
+ vpunpckhbw ymm1, ymm6, ymm6 // high 4 pixels. mutated.
+ vpunpcklwd ymm2, ymm3, ymm3 // low 4 inverted alphas. mutated. 1, 1, a, a
+ vpunpckhwd ymm3, ymm3, ymm3 // high 4 inverted alphas. mutated.
+ vpshufb ymm2, ymm2, ymm5 // replicate low 4 alphas. 1, a, a, a
+ vpshufb ymm3, ymm3, ymm5 // replicate high 4 alphas
+ vpmulhuw ymm0, ymm0, ymm2 // rgb * ia
+ vpmulhuw ymm1, ymm1, ymm3 // rgb * ia
+ vpackuswb ymm0, ymm0, ymm1 // unmutated.
+ vmovdqu [eax + edx], ymm0
+ lea eax, [eax + 32]
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // USE_GATHER
+#endif // HAS_ARGBATTENUATEROW_AVX2
+
+#ifdef HAS_ARGBGRAYROW_SSSE3
+// Convert 8 ARGB pixels (64 bytes) to 8 Gray ARGB pixels.
+__declspec(naked)
+void ARGBGrayRow_SSSE3(const uint8* src_argb, uint8* dst_argb, int width) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_argb */
+ mov ecx, [esp + 12] /* width */
+ movdqa xmm4, kARGBToYJ
+ movdqa xmm5, kAddYJ64
+
+ convertloop:
+ movdqu xmm0, [eax] // G
+ movdqu xmm1, [eax + 16]
+ pmaddubsw xmm0, xmm4
+ pmaddubsw xmm1, xmm4
+ phaddw xmm0, xmm1
+ paddw xmm0, xmm5 // Add .5 for rounding.
+ psrlw xmm0, 7
+ packuswb xmm0, xmm0 // 8 G bytes
+ movdqu xmm2, [eax] // A
+ movdqu xmm3, [eax + 16]
+ lea eax, [eax + 32]
+ psrld xmm2, 24
+ psrld xmm3, 24
+ packuswb xmm2, xmm3
+ packuswb xmm2, xmm2 // 8 A bytes
+ movdqa xmm3, xmm0 // Weave into GG, GA, then GGGA
+ punpcklbw xmm0, xmm0 // 8 GG words
+ punpcklbw xmm3, xmm2 // 8 GA words
+ movdqa xmm1, xmm0
+ punpcklwd xmm0, xmm3 // GGGA first 4
+ punpckhwd xmm1, xmm3 // GGGA next 4
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_ARGBGRAYROW_SSSE3
+
+#ifdef HAS_ARGBSEPIAROW_SSSE3
+// b = (r * 35 + g * 68 + b * 17) >> 7
+// g = (r * 45 + g * 88 + b * 22) >> 7
+// r = (r * 50 + g * 98 + b * 24) >> 7
+// Constant for ARGB color to sepia tone.
+static const vec8 kARGBToSepiaB = {
+ 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0, 17, 68, 35, 0
+};
+
+static const vec8 kARGBToSepiaG = {
+ 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0, 22, 88, 45, 0
+};
+
+static const vec8 kARGBToSepiaR = {
+ 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0, 24, 98, 50, 0
+};
+
+// Convert 8 ARGB pixels (32 bytes) to 8 Sepia ARGB pixels.
+__declspec(naked)
+void ARGBSepiaRow_SSSE3(uint8* dst_argb, int width) {
+ __asm {
+ mov eax, [esp + 4] /* dst_argb */
+ mov ecx, [esp + 8] /* width */
+ movdqa xmm2, kARGBToSepiaB
+ movdqa xmm3, kARGBToSepiaG
+ movdqa xmm4, kARGBToSepiaR
+
+ convertloop:
+ movdqu xmm0, [eax] // B
+ movdqu xmm6, [eax + 16]
+ pmaddubsw xmm0, xmm2
+ pmaddubsw xmm6, xmm2
+ phaddw xmm0, xmm6
+ psrlw xmm0, 7
+ packuswb xmm0, xmm0 // 8 B values
+ movdqu xmm5, [eax] // G
+ movdqu xmm1, [eax + 16]
+ pmaddubsw xmm5, xmm3
+ pmaddubsw xmm1, xmm3
+ phaddw xmm5, xmm1
+ psrlw xmm5, 7
+ packuswb xmm5, xmm5 // 8 G values
+ punpcklbw xmm0, xmm5 // 8 BG values
+ movdqu xmm5, [eax] // R
+ movdqu xmm1, [eax + 16]
+ pmaddubsw xmm5, xmm4
+ pmaddubsw xmm1, xmm4
+ phaddw xmm5, xmm1
+ psrlw xmm5, 7
+ packuswb xmm5, xmm5 // 8 R values
+ movdqu xmm6, [eax] // A
+ movdqu xmm1, [eax + 16]
+ psrld xmm6, 24
+ psrld xmm1, 24
+ packuswb xmm6, xmm1
+ packuswb xmm6, xmm6 // 8 A values
+ punpcklbw xmm5, xmm6 // 8 RA values
+ movdqa xmm1, xmm0 // Weave BG, RA together
+ punpcklwd xmm0, xmm5 // BGRA first 4
+ punpckhwd xmm1, xmm5 // BGRA next 4
+ movdqu [eax], xmm0
+ movdqu [eax + 16], xmm1
+ lea eax, [eax + 32]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_ARGBSEPIAROW_SSSE3
+
+#ifdef HAS_ARGBCOLORMATRIXROW_SSSE3
+// Tranform 8 ARGB pixels (32 bytes) with color matrix.
+// Same as Sepia except matrix is provided.
+// TODO(fbarchard): packuswbs only use half of the reg. To make RGBA, combine R
+// and B into a high and low, then G/A, unpackl/hbw and then unpckl/hwd.
+__declspec(naked)
+void ARGBColorMatrixRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const int8* matrix_argb, int width) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_argb */
+ mov ecx, [esp + 12] /* matrix_argb */
+ movdqu xmm5, [ecx]
+ pshufd xmm2, xmm5, 0x00
+ pshufd xmm3, xmm5, 0x55
+ pshufd xmm4, xmm5, 0xaa
+ pshufd xmm5, xmm5, 0xff
+ mov ecx, [esp + 16] /* width */
+
+ convertloop:
+ movdqu xmm0, [eax] // B
+ movdqu xmm7, [eax + 16]
+ pmaddubsw xmm0, xmm2
+ pmaddubsw xmm7, xmm2
+ movdqu xmm6, [eax] // G
+ movdqu xmm1, [eax + 16]
+ pmaddubsw xmm6, xmm3
+ pmaddubsw xmm1, xmm3
+ phaddsw xmm0, xmm7 // B
+ phaddsw xmm6, xmm1 // G
+ psraw xmm0, 6 // B
+ psraw xmm6, 6 // G
+ packuswb xmm0, xmm0 // 8 B values
+ packuswb xmm6, xmm6 // 8 G values
+ punpcklbw xmm0, xmm6 // 8 BG values
+ movdqu xmm1, [eax] // R
+ movdqu xmm7, [eax + 16]
+ pmaddubsw xmm1, xmm4
+ pmaddubsw xmm7, xmm4
+ phaddsw xmm1, xmm7 // R
+ movdqu xmm6, [eax] // A
+ movdqu xmm7, [eax + 16]
+ pmaddubsw xmm6, xmm5
+ pmaddubsw xmm7, xmm5
+ phaddsw xmm6, xmm7 // A
+ psraw xmm1, 6 // R
+ psraw xmm6, 6 // A
+ packuswb xmm1, xmm1 // 8 R values
+ packuswb xmm6, xmm6 // 8 A values
+ punpcklbw xmm1, xmm6 // 8 RA values
+ movdqa xmm6, xmm0 // Weave BG, RA together
+ punpcklwd xmm0, xmm1 // BGRA first 4
+ punpckhwd xmm6, xmm1 // BGRA next 4
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm6
+ lea eax, [eax + 32]
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_ARGBCOLORMATRIXROW_SSSE3
+
+#ifdef HAS_ARGBQUANTIZEROW_SSE2
+// Quantize 4 ARGB pixels (16 bytes).
+__declspec(naked)
+void ARGBQuantizeRow_SSE2(uint8* dst_argb, int scale, int interval_size,
+ int interval_offset, int width) {
+ __asm {
+ mov eax, [esp + 4] /* dst_argb */
+ movd xmm2, [esp + 8] /* scale */
+ movd xmm3, [esp + 12] /* interval_size */
+ movd xmm4, [esp + 16] /* interval_offset */
+ mov ecx, [esp + 20] /* width */
+ pshuflw xmm2, xmm2, 040h
+ pshufd xmm2, xmm2, 044h
+ pshuflw xmm3, xmm3, 040h
+ pshufd xmm3, xmm3, 044h
+ pshuflw xmm4, xmm4, 040h
+ pshufd xmm4, xmm4, 044h
+ pxor xmm5, xmm5 // constant 0
+ pcmpeqb xmm6, xmm6 // generate mask 0xff000000
+ pslld xmm6, 24
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels
+ punpcklbw xmm0, xmm5 // first 2 pixels
+ pmulhuw xmm0, xmm2 // pixel * scale >> 16
+ movdqu xmm1, [eax] // read 4 pixels
+ punpckhbw xmm1, xmm5 // next 2 pixels
+ pmulhuw xmm1, xmm2
+ pmullw xmm0, xmm3 // * interval_size
+ movdqu xmm7, [eax] // read 4 pixels
+ pmullw xmm1, xmm3
+ pand xmm7, xmm6 // mask alpha
+ paddw xmm0, xmm4 // + interval_size / 2
+ paddw xmm1, xmm4
+ packuswb xmm0, xmm1
+ por xmm0, xmm7
+ movdqu [eax], xmm0
+ lea eax, [eax + 16]
+ sub ecx, 4
+ jg convertloop
+ ret
+ }
+}
+#endif // HAS_ARGBQUANTIZEROW_SSE2
+
+#ifdef HAS_ARGBSHADEROW_SSE2
+// Shade 4 pixels at a time by specified value.
+__declspec(naked)
+void ARGBShadeRow_SSE2(const uint8* src_argb, uint8* dst_argb, int width,
+ uint32 value) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // width
+ movd xmm2, [esp + 16] // value
+ punpcklbw xmm2, xmm2
+ punpcklqdq xmm2, xmm2
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm0 // first 2
+ punpckhbw xmm1, xmm1 // next 2
+ pmulhuw xmm0, xmm2 // argb * value
+ pmulhuw xmm1, xmm2 // argb * value
+ psrlw xmm0, 8
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+
+ ret
+ }
+}
+#endif // HAS_ARGBSHADEROW_SSE2
+
+#ifdef HAS_ARGBMULTIPLYROW_SSE2
+// Multiply 2 rows of ARGB pixels together, 4 pixels at a time.
+__declspec(naked)
+void ARGBMultiplyRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ pxor xmm5, xmm5 // constant 0
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels from src_argb0
+ movdqu xmm2, [esi] // read 4 pixels from src_argb1
+ movdqu xmm1, xmm0
+ movdqu xmm3, xmm2
+ punpcklbw xmm0, xmm0 // first 2
+ punpckhbw xmm1, xmm1 // next 2
+ punpcklbw xmm2, xmm5 // first 2
+ punpckhbw xmm3, xmm5 // next 2
+ pmulhuw xmm0, xmm2 // src_argb0 * src_argb1 first 2
+ pmulhuw xmm1, xmm3 // src_argb0 * src_argb1 next 2
+ lea eax, [eax + 16]
+ lea esi, [esi + 16]
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBMULTIPLYROW_SSE2
+
+#ifdef HAS_ARGBADDROW_SSE2
+// Add 2 rows of ARGB pixels together, 4 pixels at a time.
+// TODO(fbarchard): Port this to posix, neon and other math functions.
+__declspec(naked)
+void ARGBAddRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+
+ sub ecx, 4
+ jl convertloop49
+
+ convertloop4:
+ movdqu xmm0, [eax] // read 4 pixels from src_argb0
+ lea eax, [eax + 16]
+ movdqu xmm1, [esi] // read 4 pixels from src_argb1
+ lea esi, [esi + 16]
+ paddusb xmm0, xmm1 // src_argb0 + src_argb1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jge convertloop4
+
+ convertloop49:
+ add ecx, 4 - 1
+ jl convertloop19
+
+ convertloop1:
+ movd xmm0, [eax] // read 1 pixels from src_argb0
+ lea eax, [eax + 4]
+ movd xmm1, [esi] // read 1 pixels from src_argb1
+ lea esi, [esi + 4]
+ paddusb xmm0, xmm1 // src_argb0 + src_argb1
+ movd [edx], xmm0
+ lea edx, [edx + 4]
+ sub ecx, 1
+ jge convertloop1
+
+ convertloop19:
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBADDROW_SSE2
+
+#ifdef HAS_ARGBSUBTRACTROW_SSE2
+// Subtract 2 rows of ARGB pixels together, 4 pixels at a time.
+__declspec(naked)
+void ARGBSubtractRow_SSE2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+
+ convertloop:
+ movdqu xmm0, [eax] // read 4 pixels from src_argb0
+ lea eax, [eax + 16]
+ movdqu xmm1, [esi] // read 4 pixels from src_argb1
+ lea esi, [esi + 16]
+ psubusb xmm0, xmm1 // src_argb0 - src_argb1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBSUBTRACTROW_SSE2
+
+#ifdef HAS_ARGBMULTIPLYROW_AVX2
+// Multiply 2 rows of ARGB pixels together, 8 pixels at a time.
+__declspec(naked)
+void ARGBMultiplyRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ vpxor ymm5, ymm5, ymm5 // constant 0
+
+ convertloop:
+ vmovdqu ymm1, [eax] // read 8 pixels from src_argb0
+ lea eax, [eax + 32]
+ vmovdqu ymm3, [esi] // read 8 pixels from src_argb1
+ lea esi, [esi + 32]
+ vpunpcklbw ymm0, ymm1, ymm1 // low 4
+ vpunpckhbw ymm1, ymm1, ymm1 // high 4
+ vpunpcklbw ymm2, ymm3, ymm5 // low 4
+ vpunpckhbw ymm3, ymm3, ymm5 // high 4
+ vpmulhuw ymm0, ymm0, ymm2 // src_argb0 * src_argb1 low 4
+ vpmulhuw ymm1, ymm1, ymm3 // src_argb0 * src_argb1 high 4
+ vpackuswb ymm0, ymm0, ymm1
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBMULTIPLYROW_AVX2
+
+#ifdef HAS_ARGBADDROW_AVX2
+// Add 2 rows of ARGB pixels together, 8 pixels at a time.
+__declspec(naked)
+void ARGBAddRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+
+ convertloop:
+ vmovdqu ymm0, [eax] // read 8 pixels from src_argb0
+ lea eax, [eax + 32]
+ vpaddusb ymm0, ymm0, [esi] // add 8 pixels from src_argb1
+ lea esi, [esi + 32]
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBADDROW_AVX2
+
+#ifdef HAS_ARGBSUBTRACTROW_AVX2
+// Subtract 2 rows of ARGB pixels together, 8 pixels at a time.
+__declspec(naked)
+void ARGBSubtractRow_AVX2(const uint8* src_argb0, const uint8* src_argb1,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb0
+ mov esi, [esp + 4 + 8] // src_argb1
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+
+ convertloop:
+ vmovdqu ymm0, [eax] // read 8 pixels from src_argb0
+ lea eax, [eax + 32]
+ vpsubusb ymm0, ymm0, [esi] // src_argb0 - src_argb1
+ lea esi, [esi + 32]
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg convertloop
+
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBSUBTRACTROW_AVX2
+
+#ifdef HAS_SOBELXROW_SSE2
+// SobelX as a matrix is
+// -1 0 1
+// -2 0 2
+// -1 0 1
+__declspec(naked)
+void SobelXRow_SSE2(const uint8* src_y0, const uint8* src_y1,
+ const uint8* src_y2, uint8* dst_sobelx, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_y0
+ mov esi, [esp + 8 + 8] // src_y1
+ mov edi, [esp + 8 + 12] // src_y2
+ mov edx, [esp + 8 + 16] // dst_sobelx
+ mov ecx, [esp + 8 + 20] // width
+ sub esi, eax
+ sub edi, eax
+ sub edx, eax
+ pxor xmm5, xmm5 // constant 0
+
+ convertloop:
+ movq xmm0, qword ptr [eax] // read 8 pixels from src_y0[0]
+ movq xmm1, qword ptr [eax + 2] // read 8 pixels from src_y0[2]
+ punpcklbw xmm0, xmm5
+ punpcklbw xmm1, xmm5
+ psubw xmm0, xmm1
+ movq xmm1, qword ptr [eax + esi] // read 8 pixels from src_y1[0]
+ movq xmm2, qword ptr [eax + esi + 2] // read 8 pixels from src_y1[2]
+ punpcklbw xmm1, xmm5
+ punpcklbw xmm2, xmm5
+ psubw xmm1, xmm2
+ movq xmm2, qword ptr [eax + edi] // read 8 pixels from src_y2[0]
+ movq xmm3, qword ptr [eax + edi + 2] // read 8 pixels from src_y2[2]
+ punpcklbw xmm2, xmm5
+ punpcklbw xmm3, xmm5
+ psubw xmm2, xmm3
+ paddw xmm0, xmm2
+ paddw xmm0, xmm1
+ paddw xmm0, xmm1
+ pxor xmm1, xmm1 // abs = max(xmm0, -xmm0). SSSE3 could use pabsw
+ psubw xmm1, xmm0
+ pmaxsw xmm0, xmm1
+ packuswb xmm0, xmm0
+ movq qword ptr [eax + edx], xmm0
+ lea eax, [eax + 8]
+ sub ecx, 8
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif // HAS_SOBELXROW_SSE2
+
+#ifdef HAS_SOBELYROW_SSE2
+// SobelY as a matrix is
+// -1 -2 -1
+// 0 0 0
+// 1 2 1
+__declspec(naked)
+void SobelYRow_SSE2(const uint8* src_y0, const uint8* src_y1,
+ uint8* dst_sobely, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_y0
+ mov esi, [esp + 4 + 8] // src_y1
+ mov edx, [esp + 4 + 12] // dst_sobely
+ mov ecx, [esp + 4 + 16] // width
+ sub esi, eax
+ sub edx, eax
+ pxor xmm5, xmm5 // constant 0
+
+ convertloop:
+ movq xmm0, qword ptr [eax] // read 8 pixels from src_y0[0]
+ movq xmm1, qword ptr [eax + esi] // read 8 pixels from src_y1[0]
+ punpcklbw xmm0, xmm5
+ punpcklbw xmm1, xmm5
+ psubw xmm0, xmm1
+ movq xmm1, qword ptr [eax + 1] // read 8 pixels from src_y0[1]
+ movq xmm2, qword ptr [eax + esi + 1] // read 8 pixels from src_y1[1]
+ punpcklbw xmm1, xmm5
+ punpcklbw xmm2, xmm5
+ psubw xmm1, xmm2
+ movq xmm2, qword ptr [eax + 2] // read 8 pixels from src_y0[2]
+ movq xmm3, qword ptr [eax + esi + 2] // read 8 pixels from src_y1[2]
+ punpcklbw xmm2, xmm5
+ punpcklbw xmm3, xmm5
+ psubw xmm2, xmm3
+ paddw xmm0, xmm2
+ paddw xmm0, xmm1
+ paddw xmm0, xmm1
+ pxor xmm1, xmm1 // abs = max(xmm0, -xmm0). SSSE3 could use pabsw
+ psubw xmm1, xmm0
+ pmaxsw xmm0, xmm1
+ packuswb xmm0, xmm0
+ movq qword ptr [eax + edx], xmm0
+ lea eax, [eax + 8]
+ sub ecx, 8
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_SOBELYROW_SSE2
+
+#ifdef HAS_SOBELROW_SSE2
+// Adds Sobel X and Sobel Y and stores Sobel into ARGB.
+// A = 255
+// R = Sobel
+// G = Sobel
+// B = Sobel
+__declspec(naked)
+void SobelRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_sobelx
+ mov esi, [esp + 4 + 8] // src_sobely
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ sub esi, eax
+ pcmpeqb xmm5, xmm5 // alpha 255
+ pslld xmm5, 24 // 0xff000000
+
+ convertloop:
+ movdqu xmm0, [eax] // read 16 pixels src_sobelx
+ movdqu xmm1, [eax + esi] // read 16 pixels src_sobely
+ lea eax, [eax + 16]
+ paddusb xmm0, xmm1 // sobel = sobelx + sobely
+ movdqa xmm2, xmm0 // GG
+ punpcklbw xmm2, xmm0 // First 8
+ punpckhbw xmm0, xmm0 // Next 8
+ movdqa xmm1, xmm2 // GGGG
+ punpcklwd xmm1, xmm2 // First 4
+ punpckhwd xmm2, xmm2 // Next 4
+ por xmm1, xmm5 // GGGA
+ por xmm2, xmm5
+ movdqa xmm3, xmm0 // GGGG
+ punpcklwd xmm3, xmm0 // Next 4
+ punpckhwd xmm0, xmm0 // Last 4
+ por xmm3, xmm5 // GGGA
+ por xmm0, xmm5
+ movdqu [edx], xmm1
+ movdqu [edx + 16], xmm2
+ movdqu [edx + 32], xmm3
+ movdqu [edx + 48], xmm0
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_SOBELROW_SSE2
+
+#ifdef HAS_SOBELTOPLANEROW_SSE2
+// Adds Sobel X and Sobel Y and stores Sobel into a plane.
+__declspec(naked)
+void SobelToPlaneRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_y, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_sobelx
+ mov esi, [esp + 4 + 8] // src_sobely
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ sub esi, eax
+
+ convertloop:
+ movdqu xmm0, [eax] // read 16 pixels src_sobelx
+ movdqu xmm1, [eax + esi] // read 16 pixels src_sobely
+ lea eax, [eax + 16]
+ paddusb xmm0, xmm1 // sobel = sobelx + sobely
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_SOBELTOPLANEROW_SSE2
+
+#ifdef HAS_SOBELXYROW_SSE2
+// Mixes Sobel X, Sobel Y and Sobel into ARGB.
+// A = 255
+// R = Sobel X
+// G = Sobel
+// B = Sobel Y
+__declspec(naked)
+void SobelXYRow_SSE2(const uint8* src_sobelx, const uint8* src_sobely,
+ uint8* dst_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_sobelx
+ mov esi, [esp + 4 + 8] // src_sobely
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // width
+ sub esi, eax
+ pcmpeqb xmm5, xmm5 // alpha 255
+
+ convertloop:
+ movdqu xmm0, [eax] // read 16 pixels src_sobelx
+ movdqu xmm1, [eax + esi] // read 16 pixels src_sobely
+ lea eax, [eax + 16]
+ movdqa xmm2, xmm0
+ paddusb xmm2, xmm1 // sobel = sobelx + sobely
+ movdqa xmm3, xmm0 // XA
+ punpcklbw xmm3, xmm5
+ punpckhbw xmm0, xmm5
+ movdqa xmm4, xmm1 // YS
+ punpcklbw xmm4, xmm2
+ punpckhbw xmm1, xmm2
+ movdqa xmm6, xmm4 // YSXA
+ punpcklwd xmm6, xmm3 // First 4
+ punpckhwd xmm4, xmm3 // Next 4
+ movdqa xmm7, xmm1 // YSXA
+ punpcklwd xmm7, xmm0 // Next 4
+ punpckhwd xmm1, xmm0 // Last 4
+ movdqu [edx], xmm6
+ movdqu [edx + 16], xmm4
+ movdqu [edx + 32], xmm7
+ movdqu [edx + 48], xmm1
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_SOBELXYROW_SSE2
+
+#ifdef HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
+// Consider float CumulativeSum.
+// Consider calling CumulativeSum one row at time as needed.
+// Consider circular CumulativeSum buffer of radius * 2 + 1 height.
+// Convert cumulative sum for an area to an average for 1 pixel.
+// topleft is pointer to top left of CumulativeSum buffer for area.
+// botleft is pointer to bottom left of CumulativeSum buffer.
+// width is offset from left to right of area in CumulativeSum buffer measured
+// in number of ints.
+// area is the number of pixels in the area being averaged.
+// dst points to pixel to store result to.
+// count is number of averaged pixels to produce.
+// Does 4 pixels at a time.
+void CumulativeSumToAverageRow_SSE2(const int32* topleft, const int32* botleft,
+ int width, int area, uint8* dst,
+ int count) {
+ __asm {
+ mov eax, topleft // eax topleft
+ mov esi, botleft // esi botleft
+ mov edx, width
+ movd xmm5, area
+ mov edi, dst
+ mov ecx, count
+ cvtdq2ps xmm5, xmm5
+ rcpss xmm4, xmm5 // 1.0f / area
+ pshufd xmm4, xmm4, 0
+ sub ecx, 4
+ jl l4b
+
+ cmp area, 128 // 128 pixels will not overflow 15 bits.
+ ja l4
+
+ pshufd xmm5, xmm5, 0 // area
+ pcmpeqb xmm6, xmm6 // constant of 65536.0 - 1 = 65535.0
+ psrld xmm6, 16
+ cvtdq2ps xmm6, xmm6
+ addps xmm5, xmm6 // (65536.0 + area - 1)
+ mulps xmm5, xmm4 // (65536.0 + area - 1) * 1 / area
+ cvtps2dq xmm5, xmm5 // 0.16 fixed point
+ packssdw xmm5, xmm5 // 16 bit shorts
+
+ // 4 pixel loop small blocks.
+ s4:
+ // top left
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+
+ // - top right
+ psubd xmm0, [eax + edx * 4]
+ psubd xmm1, [eax + edx * 4 + 16]
+ psubd xmm2, [eax + edx * 4 + 32]
+ psubd xmm3, [eax + edx * 4 + 48]
+ lea eax, [eax + 64]
+
+ // - bottom left
+ psubd xmm0, [esi]
+ psubd xmm1, [esi + 16]
+ psubd xmm2, [esi + 32]
+ psubd xmm3, [esi + 48]
+
+ // + bottom right
+ paddd xmm0, [esi + edx * 4]
+ paddd xmm1, [esi + edx * 4 + 16]
+ paddd xmm2, [esi + edx * 4 + 32]
+ paddd xmm3, [esi + edx * 4 + 48]
+ lea esi, [esi + 64]
+
+ packssdw xmm0, xmm1 // pack 4 pixels into 2 registers
+ packssdw xmm2, xmm3
+
+ pmulhuw xmm0, xmm5
+ pmulhuw xmm2, xmm5
+
+ packuswb xmm0, xmm2
+ movdqu [edi], xmm0
+ lea edi, [edi + 16]
+ sub ecx, 4
+ jge s4
+
+ jmp l4b
+
+ // 4 pixel loop
+ l4:
+ // top left
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + 32]
+ movdqu xmm3, [eax + 48]
+
+ // - top right
+ psubd xmm0, [eax + edx * 4]
+ psubd xmm1, [eax + edx * 4 + 16]
+ psubd xmm2, [eax + edx * 4 + 32]
+ psubd xmm3, [eax + edx * 4 + 48]
+ lea eax, [eax + 64]
+
+ // - bottom left
+ psubd xmm0, [esi]
+ psubd xmm1, [esi + 16]
+ psubd xmm2, [esi + 32]
+ psubd xmm3, [esi + 48]
+
+ // + bottom right
+ paddd xmm0, [esi + edx * 4]
+ paddd xmm1, [esi + edx * 4 + 16]
+ paddd xmm2, [esi + edx * 4 + 32]
+ paddd xmm3, [esi + edx * 4 + 48]
+ lea esi, [esi + 64]
+
+ cvtdq2ps xmm0, xmm0 // Average = Sum * 1 / Area
+ cvtdq2ps xmm1, xmm1
+ mulps xmm0, xmm4
+ mulps xmm1, xmm4
+ cvtdq2ps xmm2, xmm2
+ cvtdq2ps xmm3, xmm3
+ mulps xmm2, xmm4
+ mulps xmm3, xmm4
+ cvtps2dq xmm0, xmm0
+ cvtps2dq xmm1, xmm1
+ cvtps2dq xmm2, xmm2
+ cvtps2dq xmm3, xmm3
+ packssdw xmm0, xmm1
+ packssdw xmm2, xmm3
+ packuswb xmm0, xmm2
+ movdqu [edi], xmm0
+ lea edi, [edi + 16]
+ sub ecx, 4
+ jge l4
+
+ l4b:
+ add ecx, 4 - 1
+ jl l1b
+
+ // 1 pixel loop
+ l1:
+ movdqu xmm0, [eax]
+ psubd xmm0, [eax + edx * 4]
+ lea eax, [eax + 16]
+ psubd xmm0, [esi]
+ paddd xmm0, [esi + edx * 4]
+ lea esi, [esi + 16]
+ cvtdq2ps xmm0, xmm0
+ mulps xmm0, xmm4
+ cvtps2dq xmm0, xmm0
+ packssdw xmm0, xmm0
+ packuswb xmm0, xmm0
+ movd dword ptr [edi], xmm0
+ lea edi, [edi + 4]
+ sub ecx, 1
+ jge l1
+ l1b:
+ }
+}
+#endif // HAS_CUMULATIVESUMTOAVERAGEROW_SSE2
+
+#ifdef HAS_COMPUTECUMULATIVESUMROW_SSE2
+// Creates a table of cumulative sums where each value is a sum of all values
+// above and to the left of the value.
+void ComputeCumulativeSumRow_SSE2(const uint8* row, int32* cumsum,
+ const int32* previous_cumsum, int width) {
+ __asm {
+ mov eax, row
+ mov edx, cumsum
+ mov esi, previous_cumsum
+ mov ecx, width
+ pxor xmm0, xmm0
+ pxor xmm1, xmm1
+
+ sub ecx, 4
+ jl l4b
+ test edx, 15
+ jne l4b
+
+ // 4 pixel loop
+ l4:
+ movdqu xmm2, [eax] // 4 argb pixels 16 bytes.
+ lea eax, [eax + 16]
+ movdqa xmm4, xmm2
+
+ punpcklbw xmm2, xmm1
+ movdqa xmm3, xmm2
+ punpcklwd xmm2, xmm1
+ punpckhwd xmm3, xmm1
+
+ punpckhbw xmm4, xmm1
+ movdqa xmm5, xmm4
+ punpcklwd xmm4, xmm1
+ punpckhwd xmm5, xmm1
+
+ paddd xmm0, xmm2
+ movdqu xmm2, [esi] // previous row above.
+ paddd xmm2, xmm0
+
+ paddd xmm0, xmm3
+ movdqu xmm3, [esi + 16]
+ paddd xmm3, xmm0
+
+ paddd xmm0, xmm4
+ movdqu xmm4, [esi + 32]
+ paddd xmm4, xmm0
+
+ paddd xmm0, xmm5
+ movdqu xmm5, [esi + 48]
+ lea esi, [esi + 64]
+ paddd xmm5, xmm0
+
+ movdqu [edx], xmm2
+ movdqu [edx + 16], xmm3
+ movdqu [edx + 32], xmm4
+ movdqu [edx + 48], xmm5
+
+ lea edx, [edx + 64]
+ sub ecx, 4
+ jge l4
+
+ l4b:
+ add ecx, 4 - 1
+ jl l1b
+
+ // 1 pixel loop
+ l1:
+ movd xmm2, dword ptr [eax] // 1 argb pixel 4 bytes.
+ lea eax, [eax + 4]
+ punpcklbw xmm2, xmm1
+ punpcklwd xmm2, xmm1
+ paddd xmm0, xmm2
+ movdqu xmm2, [esi]
+ lea esi, [esi + 16]
+ paddd xmm2, xmm0
+ movdqu [edx], xmm2
+ lea edx, [edx + 16]
+ sub ecx, 1
+ jge l1
+
+ l1b:
+ }
+}
+#endif // HAS_COMPUTECUMULATIVESUMROW_SSE2
+
+#ifdef HAS_ARGBAFFINEROW_SSE2
+// Copy ARGB pixels from source image with slope to a row of destination.
+__declspec(naked)
+LIBYUV_API
+void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
+ uint8* dst_argb, const float* uv_dudv, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 12] // src_argb
+ mov esi, [esp + 16] // stride
+ mov edx, [esp + 20] // dst_argb
+ mov ecx, [esp + 24] // pointer to uv_dudv
+ movq xmm2, qword ptr [ecx] // uv
+ movq xmm7, qword ptr [ecx + 8] // dudv
+ mov ecx, [esp + 28] // width
+ shl esi, 16 // 4, stride
+ add esi, 4
+ movd xmm5, esi
+ sub ecx, 4
+ jl l4b
+
+ // setup for 4 pixel loop
+ pshufd xmm7, xmm7, 0x44 // dup dudv
+ pshufd xmm5, xmm5, 0 // dup 4, stride
+ movdqa xmm0, xmm2 // x0, y0, x1, y1
+ addps xmm0, xmm7
+ movlhps xmm2, xmm0
+ movdqa xmm4, xmm7
+ addps xmm4, xmm4 // dudv *= 2
+ movdqa xmm3, xmm2 // x2, y2, x3, y3
+ addps xmm3, xmm4
+ addps xmm4, xmm4 // dudv *= 4
+
+ // 4 pixel loop
+ l4:
+ cvttps2dq xmm0, xmm2 // x, y float to int first 2
+ cvttps2dq xmm1, xmm3 // x, y float to int next 2
+ packssdw xmm0, xmm1 // x, y as 8 shorts
+ pmaddwd xmm0, xmm5 // offsets = x * 4 + y * stride.
+ movd esi, xmm0
+ pshufd xmm0, xmm0, 0x39 // shift right
+ movd edi, xmm0
+ pshufd xmm0, xmm0, 0x39 // shift right
+ movd xmm1, [eax + esi] // read pixel 0
+ movd xmm6, [eax + edi] // read pixel 1
+ punpckldq xmm1, xmm6 // combine pixel 0 and 1
+ addps xmm2, xmm4 // x, y += dx, dy first 2
+ movq qword ptr [edx], xmm1
+ movd esi, xmm0
+ pshufd xmm0, xmm0, 0x39 // shift right
+ movd edi, xmm0
+ movd xmm6, [eax + esi] // read pixel 2
+ movd xmm0, [eax + edi] // read pixel 3
+ punpckldq xmm6, xmm0 // combine pixel 2 and 3
+ addps xmm3, xmm4 // x, y += dx, dy next 2
+ movq qword ptr 8[edx], xmm6
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jge l4
+
+ l4b:
+ add ecx, 4 - 1
+ jl l1b
+
+ // 1 pixel loop
+ l1:
+ cvttps2dq xmm0, xmm2 // x, y float to int
+ packssdw xmm0, xmm0 // x, y as shorts
+ pmaddwd xmm0, xmm5 // offset = x * 4 + y * stride
+ addps xmm2, xmm7 // x, y += dx, dy
+ movd esi, xmm0
+ movd xmm0, [eax + esi] // copy a pixel
+ movd [edx], xmm0
+ lea edx, [edx + 4]
+ sub ecx, 1
+ jge l1
+ l1b:
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBAFFINEROW_SSE2
+
+#ifdef HAS_INTERPOLATEROW_AVX2
+// Bilinear filter 32x2 -> 32x1
+__declspec(naked)
+void InterpolateRow_AVX2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ __asm {
+ push esi
+ push edi
+ mov edi, [esp + 8 + 4] // dst_ptr
+ mov esi, [esp + 8 + 8] // src_ptr
+ mov edx, [esp + 8 + 12] // src_stride
+ mov ecx, [esp + 8 + 16] // dst_width
+ mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
+ shr eax, 1
+ // Dispatch to specialized filters if applicable.
+ cmp eax, 0
+ je xloop100 // 0 / 128. Blend 100 / 0.
+ sub edi, esi
+ cmp eax, 32
+ je xloop75 // 32 / 128 is 0.25. Blend 75 / 25.
+ cmp eax, 64
+ je xloop50 // 64 / 128 is 0.50. Blend 50 / 50.
+ cmp eax, 96
+ je xloop25 // 96 / 128 is 0.75. Blend 25 / 75.
+
+ vmovd xmm0, eax // high fraction 0..127
+ neg eax
+ add eax, 128
+ vmovd xmm5, eax // low fraction 128..1
+ vpunpcklbw xmm5, xmm5, xmm0
+ vpunpcklwd xmm5, xmm5, xmm5
+ vpxor ymm0, ymm0, ymm0
+ vpermd ymm5, ymm0, ymm5
+
+ xloop:
+ vmovdqu ymm0, [esi]
+ vmovdqu ymm2, [esi + edx]
+ vpunpckhbw ymm1, ymm0, ymm2 // mutates
+ vpunpcklbw ymm0, ymm0, ymm2 // mutates
+ vpmaddubsw ymm0, ymm0, ymm5
+ vpmaddubsw ymm1, ymm1, ymm5
+ vpsrlw ymm0, ymm0, 7
+ vpsrlw ymm1, ymm1, 7
+ vpackuswb ymm0, ymm0, ymm1 // unmutates
+ vmovdqu [esi + edi], ymm0
+ lea esi, [esi + 32]
+ sub ecx, 32
+ jg xloop
+ jmp xloop99
+
+ // Blend 25 / 75.
+ xloop25:
+ vmovdqu ymm0, [esi]
+ vmovdqu ymm1, [esi + edx]
+ vpavgb ymm0, ymm0, ymm1
+ vpavgb ymm0, ymm0, ymm1
+ vmovdqu [esi + edi], ymm0
+ lea esi, [esi + 32]
+ sub ecx, 32
+ jg xloop25
+ jmp xloop99
+
+ // Blend 50 / 50.
+ xloop50:
+ vmovdqu ymm0, [esi]
+ vpavgb ymm0, ymm0, [esi + edx]
+ vmovdqu [esi + edi], ymm0
+ lea esi, [esi + 32]
+ sub ecx, 32
+ jg xloop50
+ jmp xloop99
+
+ // Blend 75 / 25.
+ xloop75:
+ vmovdqu ymm1, [esi]
+ vmovdqu ymm0, [esi + edx]
+ vpavgb ymm0, ymm0, ymm1
+ vpavgb ymm0, ymm0, ymm1
+ vmovdqu [esi + edi], ymm0
+ lea esi, [esi + 32]
+ sub ecx, 32
+ jg xloop75
+ jmp xloop99
+
+ // Blend 100 / 0 - Copy row unchanged.
+ xloop100:
+ rep movsb
+
+ xloop99:
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_INTERPOLATEROW_AVX2
+
+// Bilinear filter 16x2 -> 16x1
+__declspec(naked)
+void InterpolateRow_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ __asm {
+ push esi
+ push edi
+ mov edi, [esp + 8 + 4] // dst_ptr
+ mov esi, [esp + 8 + 8] // src_ptr
+ mov edx, [esp + 8 + 12] // src_stride
+ mov ecx, [esp + 8 + 16] // dst_width
+ mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
+ sub edi, esi
+ shr eax, 1
+ // Dispatch to specialized filters if applicable.
+ cmp eax, 0
+ je xloop100 // 0 / 128. Blend 100 / 0.
+ cmp eax, 32
+ je xloop75 // 32 / 128 is 0.25. Blend 75 / 25.
+ cmp eax, 64
+ je xloop50 // 64 / 128 is 0.50. Blend 50 / 50.
+ cmp eax, 96
+ je xloop25 // 96 / 128 is 0.75. Blend 25 / 75.
+
+ movd xmm0, eax // high fraction 0..127
+ neg eax
+ add eax, 128
+ movd xmm5, eax // low fraction 128..1
+ punpcklbw xmm5, xmm0
+ punpcklwd xmm5, xmm5
+ pshufd xmm5, xmm5, 0
+
+ xloop:
+ movdqu xmm0, [esi]
+ movdqu xmm2, [esi + edx]
+ movdqu xmm1, xmm0
+ punpcklbw xmm0, xmm2
+ punpckhbw xmm1, xmm2
+ pmaddubsw xmm0, xmm5
+ pmaddubsw xmm1, xmm5
+ psrlw xmm0, 7
+ psrlw xmm1, 7
+ packuswb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop
+ jmp xloop99
+
+ // Blend 25 / 75.
+ xloop25:
+ movdqu xmm0, [esi]
+ movdqu xmm1, [esi + edx]
+ pavgb xmm0, xmm1
+ pavgb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop25
+ jmp xloop99
+
+ // Blend 50 / 50.
+ xloop50:
+ movdqu xmm0, [esi]
+ movdqu xmm1, [esi + edx]
+ pavgb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop50
+ jmp xloop99
+
+ // Blend 75 / 25.
+ xloop75:
+ movdqu xmm1, [esi]
+ movdqu xmm0, [esi + edx]
+ pavgb xmm0, xmm1
+ pavgb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop75
+ jmp xloop99
+
+ // Blend 100 / 0 - Copy row unchanged.
+ xloop100:
+ movdqu xmm0, [esi]
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop100
+
+ xloop99:
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+#ifdef HAS_INTERPOLATEROW_SSE2
+// Bilinear filter 16x2 -> 16x1
+__declspec(naked)
+void InterpolateRow_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width,
+ int source_y_fraction) {
+ __asm {
+ push esi
+ push edi
+ mov edi, [esp + 8 + 4] // dst_ptr
+ mov esi, [esp + 8 + 8] // src_ptr
+ mov edx, [esp + 8 + 12] // src_stride
+ mov ecx, [esp + 8 + 16] // dst_width
+ mov eax, [esp + 8 + 20] // source_y_fraction (0..255)
+ sub edi, esi
+ // Dispatch to specialized filters if applicable.
+ cmp eax, 0
+ je xloop100 // 0 / 256. Blend 100 / 0.
+ cmp eax, 64
+ je xloop75 // 64 / 256 is 0.25. Blend 75 / 25.
+ cmp eax, 128
+ je xloop50 // 128 / 256 is 0.50. Blend 50 / 50.
+ cmp eax, 192
+ je xloop25 // 192 / 256 is 0.75. Blend 25 / 75.
+
+ movd xmm5, eax // xmm5 = y fraction
+ punpcklbw xmm5, xmm5
+ psrlw xmm5, 1
+ punpcklwd xmm5, xmm5
+ punpckldq xmm5, xmm5
+ punpcklqdq xmm5, xmm5
+ pxor xmm4, xmm4
+
+ xloop:
+ movdqu xmm0, [esi] // row0
+ movdqu xmm2, [esi + edx] // row1
+ movdqu xmm1, xmm0
+ movdqu xmm3, xmm2
+ punpcklbw xmm2, xmm4
+ punpckhbw xmm3, xmm4
+ punpcklbw xmm0, xmm4
+ punpckhbw xmm1, xmm4
+ psubw xmm2, xmm0 // row1 - row0
+ psubw xmm3, xmm1
+ paddw xmm2, xmm2 // 9 bits * 15 bits = 8.16
+ paddw xmm3, xmm3
+ pmulhw xmm2, xmm5 // scale diff
+ pmulhw xmm3, xmm5
+ paddw xmm0, xmm2 // sum rows
+ paddw xmm1, xmm3
+ packuswb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop
+ jmp xloop99
+
+ // Blend 25 / 75.
+ xloop25:
+ movdqu xmm0, [esi]
+ movdqu xmm1, [esi + edx]
+ pavgb xmm0, xmm1
+ pavgb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop25
+ jmp xloop99
+
+ // Blend 50 / 50.
+ xloop50:
+ movdqu xmm0, [esi]
+ movdqu xmm1, [esi + edx]
+ pavgb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop50
+ jmp xloop99
+
+ // Blend 75 / 25.
+ xloop75:
+ movdqu xmm1, [esi]
+ movdqu xmm0, [esi + edx]
+ pavgb xmm0, xmm1
+ pavgb xmm0, xmm1
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop75
+ jmp xloop99
+
+ // Blend 100 / 0 - Copy row unchanged.
+ xloop100:
+ movdqu xmm0, [esi]
+ movdqu [esi + edi], xmm0
+ lea esi, [esi + 16]
+ sub ecx, 16
+ jg xloop100
+
+ xloop99:
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif // HAS_INTERPOLATEROW_SSE2
+
+// For BGRAToARGB, ABGRToARGB, RGBAToARGB, and ARGBToRGBA.
+__declspec(naked)
+void ARGBShuffleRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // shuffler
+ movdqu xmm5, [ecx]
+ mov ecx, [esp + 16] // pix
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ pshufb xmm0, xmm5
+ pshufb xmm1, xmm5
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg wloop
+ ret
+ }
+}
+
+#ifdef HAS_ARGBSHUFFLEROW_AVX2
+__declspec(naked)
+void ARGBShuffleRow_AVX2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ mov edx, [esp + 8] // dst_argb
+ mov ecx, [esp + 12] // shuffler
+ vbroadcastf128 ymm5, [ecx] // same shuffle in high as low.
+ mov ecx, [esp + 16] // pix
+
+ wloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpshufb ymm0, ymm0, ymm5
+ vpshufb ymm1, ymm1, ymm5
+ vmovdqu [edx], ymm0
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 16
+ jg wloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBSHUFFLEROW_AVX2
+
+__declspec(naked)
+void ARGBShuffleRow_SSE2(const uint8* src_argb, uint8* dst_argb,
+ const uint8* shuffler, int pix) {
+ __asm {
+ push ebx
+ push esi
+ mov eax, [esp + 8 + 4] // src_argb
+ mov edx, [esp + 8 + 8] // dst_argb
+ mov esi, [esp + 8 + 12] // shuffler
+ mov ecx, [esp + 8 + 16] // pix
+ pxor xmm5, xmm5
+
+ mov ebx, [esi] // shuffler
+ cmp ebx, 0x03000102
+ je shuf_3012
+ cmp ebx, 0x00010203
+ je shuf_0123
+ cmp ebx, 0x00030201
+ je shuf_0321
+ cmp ebx, 0x02010003
+ je shuf_2103
+
+ // TODO(fbarchard): Use one source pointer and 3 offsets.
+ shuf_any1:
+ movzx ebx, byte ptr [esi]
+ movzx ebx, byte ptr [eax + ebx]
+ mov [edx], bl
+ movzx ebx, byte ptr [esi + 1]
+ movzx ebx, byte ptr [eax + ebx]
+ mov [edx + 1], bl
+ movzx ebx, byte ptr [esi + 2]
+ movzx ebx, byte ptr [eax + ebx]
+ mov [edx + 2], bl
+ movzx ebx, byte ptr [esi + 3]
+ movzx ebx, byte ptr [eax + ebx]
+ mov [edx + 3], bl
+ lea eax, [eax + 4]
+ lea edx, [edx + 4]
+ sub ecx, 1
+ jg shuf_any1
+ jmp shuf99
+
+ shuf_0123:
+ movdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm5
+ punpckhbw xmm1, xmm5
+ pshufhw xmm0, xmm0, 01Bh // 1B = 00011011 = 0x0123 = BGRAToARGB
+ pshuflw xmm0, xmm0, 01Bh
+ pshufhw xmm1, xmm1, 01Bh
+ pshuflw xmm1, xmm1, 01Bh
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg shuf_0123
+ jmp shuf99
+
+ shuf_0321:
+ movdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm5
+ punpckhbw xmm1, xmm5
+ pshufhw xmm0, xmm0, 039h // 39 = 00111001 = 0x0321 = RGBAToARGB
+ pshuflw xmm0, xmm0, 039h
+ pshufhw xmm1, xmm1, 039h
+ pshuflw xmm1, xmm1, 039h
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg shuf_0321
+ jmp shuf99
+
+ shuf_2103:
+ movdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm5
+ punpckhbw xmm1, xmm5
+ pshufhw xmm0, xmm0, 093h // 93 = 10010011 = 0x2103 = ARGBToRGBA
+ pshuflw xmm0, xmm0, 093h
+ pshufhw xmm1, xmm1, 093h
+ pshuflw xmm1, xmm1, 093h
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg shuf_2103
+ jmp shuf99
+
+ shuf_3012:
+ movdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm5
+ punpckhbw xmm1, xmm5
+ pshufhw xmm0, xmm0, 0C6h // C6 = 11000110 = 0x3012 = ABGRToARGB
+ pshuflw xmm0, xmm0, 0C6h
+ pshufhw xmm1, xmm1, 0C6h
+ pshuflw xmm1, xmm1, 0C6h
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg shuf_3012
+
+ shuf99:
+ pop esi
+ pop ebx
+ ret
+ }
+}
+
+// YUY2 - Macro-pixel = 2 image pixels
+// Y0U0Y1V0....Y2U2Y3V2...Y4U4Y5V4....
+
+// UYVY - Macro-pixel = 2 image pixels
+// U0Y0V0Y1
+
+__declspec(naked)
+void I422ToYUY2Row_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_frame, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_y
+ mov esi, [esp + 8 + 8] // src_u
+ mov edx, [esp + 8 + 12] // src_v
+ mov edi, [esp + 8 + 16] // dst_frame
+ mov ecx, [esp + 8 + 20] // width
+ sub edx, esi
+
+ convertloop:
+ movq xmm2, qword ptr [esi] // U
+ movq xmm3, qword ptr [esi + edx] // V
+ lea esi, [esi + 8]
+ punpcklbw xmm2, xmm3 // UV
+ movdqu xmm0, [eax] // Y
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm2 // YUYV
+ punpckhbw xmm1, xmm2
+ movdqu [edi], xmm0
+ movdqu [edi + 16], xmm1
+ lea edi, [edi + 32]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+__declspec(naked)
+void I422ToUYVYRow_SSE2(const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_frame, int width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_y
+ mov esi, [esp + 8 + 8] // src_u
+ mov edx, [esp + 8 + 12] // src_v
+ mov edi, [esp + 8 + 16] // dst_frame
+ mov ecx, [esp + 8 + 20] // width
+ sub edx, esi
+
+ convertloop:
+ movq xmm2, qword ptr [esi] // U
+ movq xmm3, qword ptr [esi + edx] // V
+ lea esi, [esi + 8]
+ punpcklbw xmm2, xmm3 // UV
+ movdqu xmm0, [eax] // Y
+ movdqa xmm1, xmm2
+ lea eax, [eax + 16]
+ punpcklbw xmm1, xmm0 // UYVY
+ punpckhbw xmm2, xmm0
+ movdqu [edi], xmm1
+ movdqu [edi + 16], xmm2
+ lea edi, [edi + 32]
+ sub ecx, 16
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+#ifdef HAS_ARGBPOLYNOMIALROW_SSE2
+__declspec(naked)
+void ARGBPolynomialRow_SSE2(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] /* src_argb */
+ mov edx, [esp + 4 + 8] /* dst_argb */
+ mov esi, [esp + 4 + 12] /* poly */
+ mov ecx, [esp + 4 + 16] /* width */
+ pxor xmm3, xmm3 // 0 constant for zero extending bytes to ints.
+
+ // 2 pixel loop.
+ convertloop:
+// pmovzxbd xmm0, dword ptr [eax] // BGRA pixel
+// pmovzxbd xmm4, dword ptr [eax + 4] // BGRA pixel
+ movq xmm0, qword ptr [eax] // BGRABGRA
+ lea eax, [eax + 8]
+ punpcklbw xmm0, xmm3
+ movdqa xmm4, xmm0
+ punpcklwd xmm0, xmm3 // pixel 0
+ punpckhwd xmm4, xmm3 // pixel 1
+ cvtdq2ps xmm0, xmm0 // 4 floats
+ cvtdq2ps xmm4, xmm4
+ movdqa xmm1, xmm0 // X
+ movdqa xmm5, xmm4
+ mulps xmm0, [esi + 16] // C1 * X
+ mulps xmm4, [esi + 16]
+ addps xmm0, [esi] // result = C0 + C1 * X
+ addps xmm4, [esi]
+ movdqa xmm2, xmm1
+ movdqa xmm6, xmm5
+ mulps xmm2, xmm1 // X * X
+ mulps xmm6, xmm5
+ mulps xmm1, xmm2 // X * X * X
+ mulps xmm5, xmm6
+ mulps xmm2, [esi + 32] // C2 * X * X
+ mulps xmm6, [esi + 32]
+ mulps xmm1, [esi + 48] // C3 * X * X * X
+ mulps xmm5, [esi + 48]
+ addps xmm0, xmm2 // result += C2 * X * X
+ addps xmm4, xmm6
+ addps xmm0, xmm1 // result += C3 * X * X * X
+ addps xmm4, xmm5
+ cvttps2dq xmm0, xmm0
+ cvttps2dq xmm4, xmm4
+ packuswb xmm0, xmm4
+ packuswb xmm0, xmm0
+ movq qword ptr [edx], xmm0
+ lea edx, [edx + 8]
+ sub ecx, 2
+ jg convertloop
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBPOLYNOMIALROW_SSE2
+
+#ifdef HAS_ARGBPOLYNOMIALROW_AVX2
+__declspec(naked)
+void ARGBPolynomialRow_AVX2(const uint8* src_argb,
+ uint8* dst_argb, const float* poly,
+ int width) {
+ __asm {
+ mov eax, [esp + 4] /* src_argb */
+ mov edx, [esp + 8] /* dst_argb */
+ mov ecx, [esp + 12] /* poly */
+ vbroadcastf128 ymm4, [ecx] // C0
+ vbroadcastf128 ymm5, [ecx + 16] // C1
+ vbroadcastf128 ymm6, [ecx + 32] // C2
+ vbroadcastf128 ymm7, [ecx + 48] // C3
+ mov ecx, [esp + 16] /* width */
+
+ // 2 pixel loop.
+ convertloop:
+ vpmovzxbd ymm0, qword ptr [eax] // 2 BGRA pixels
+ lea eax, [eax + 8]
+ vcvtdq2ps ymm0, ymm0 // X 8 floats
+ vmulps ymm2, ymm0, ymm0 // X * X
+ vmulps ymm3, ymm0, ymm7 // C3 * X
+ vfmadd132ps ymm0, ymm4, ymm5 // result = C0 + C1 * X
+ vfmadd231ps ymm0, ymm2, ymm6 // result += C2 * X * X
+ vfmadd231ps ymm0, ymm2, ymm3 // result += C3 * X * X * X
+ vcvttps2dq ymm0, ymm0
+ vpackusdw ymm0, ymm0, ymm0 // b0g0r0a0_00000000_b0g0r0a0_00000000
+ vpermq ymm0, ymm0, 0xd8 // b0g0r0a0_b0g0r0a0_00000000_00000000
+ vpackuswb xmm0, xmm0, xmm0 // bgrabgra_00000000_00000000_00000000
+ vmovq qword ptr [edx], xmm0
+ lea edx, [edx + 8]
+ sub ecx, 2
+ jg convertloop
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_ARGBPOLYNOMIALROW_AVX2
+
+#ifdef HAS_ARGBCOLORTABLEROW_X86
+// Tranform ARGB pixels with color table.
+__declspec(naked)
+void ARGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb,
+ int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] /* dst_argb */
+ mov esi, [esp + 4 + 8] /* table_argb */
+ mov ecx, [esp + 4 + 12] /* width */
+
+ // 1 pixel loop.
+ convertloop:
+ movzx edx, byte ptr [eax]
+ lea eax, [eax + 4]
+ movzx edx, byte ptr [esi + edx * 4]
+ mov byte ptr [eax - 4], dl
+ movzx edx, byte ptr [eax - 4 + 1]
+ movzx edx, byte ptr [esi + edx * 4 + 1]
+ mov byte ptr [eax - 4 + 1], dl
+ movzx edx, byte ptr [eax - 4 + 2]
+ movzx edx, byte ptr [esi + edx * 4 + 2]
+ mov byte ptr [eax - 4 + 2], dl
+ movzx edx, byte ptr [eax - 4 + 3]
+ movzx edx, byte ptr [esi + edx * 4 + 3]
+ mov byte ptr [eax - 4 + 3], dl
+ dec ecx
+ jg convertloop
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBCOLORTABLEROW_X86
+
+#ifdef HAS_RGBCOLORTABLEROW_X86
+// Tranform RGB pixels with color table.
+__declspec(naked)
+void RGBColorTableRow_X86(uint8* dst_argb, const uint8* table_argb, int width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] /* dst_argb */
+ mov esi, [esp + 4 + 8] /* table_argb */
+ mov ecx, [esp + 4 + 12] /* width */
+
+ // 1 pixel loop.
+ convertloop:
+ movzx edx, byte ptr [eax]
+ lea eax, [eax + 4]
+ movzx edx, byte ptr [esi + edx * 4]
+ mov byte ptr [eax - 4], dl
+ movzx edx, byte ptr [eax - 4 + 1]
+ movzx edx, byte ptr [esi + edx * 4 + 1]
+ mov byte ptr [eax - 4 + 1], dl
+ movzx edx, byte ptr [eax - 4 + 2]
+ movzx edx, byte ptr [esi + edx * 4 + 2]
+ mov byte ptr [eax - 4 + 2], dl
+ dec ecx
+ jg convertloop
+
+ pop esi
+ ret
+ }
+}
+#endif // HAS_RGBCOLORTABLEROW_X86
+
+#ifdef HAS_ARGBLUMACOLORTABLEROW_SSSE3
+// Tranform RGB pixels with luma table.
+__declspec(naked)
+void ARGBLumaColorTableRow_SSSE3(const uint8* src_argb, uint8* dst_argb,
+ int width,
+ const uint8* luma, uint32 lumacoeff) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] /* src_argb */
+ mov edi, [esp + 8 + 8] /* dst_argb */
+ mov ecx, [esp + 8 + 12] /* width */
+ movd xmm2, dword ptr [esp + 8 + 16] // luma table
+ movd xmm3, dword ptr [esp + 8 + 20] // lumacoeff
+ pshufd xmm2, xmm2, 0
+ pshufd xmm3, xmm3, 0
+ pcmpeqb xmm4, xmm4 // generate mask 0xff00ff00
+ psllw xmm4, 8
+ pxor xmm5, xmm5
+
+ // 4 pixel loop.
+ convertloop:
+ movdqu xmm0, qword ptr [eax] // generate luma ptr
+ pmaddubsw xmm0, xmm3
+ phaddw xmm0, xmm0
+ pand xmm0, xmm4 // mask out low bits
+ punpcklwd xmm0, xmm5
+ paddd xmm0, xmm2 // add table base
+ movd esi, xmm0
+ pshufd xmm0, xmm0, 0x39 // 00111001 to rotate right 32
+
+ movzx edx, byte ptr [eax]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi], dl
+ movzx edx, byte ptr [eax + 1]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 1], dl
+ movzx edx, byte ptr [eax + 2]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 2], dl
+ movzx edx, byte ptr [eax + 3] // copy alpha.
+ mov byte ptr [edi + 3], dl
+
+ movd esi, xmm0
+ pshufd xmm0, xmm0, 0x39 // 00111001 to rotate right 32
+
+ movzx edx, byte ptr [eax + 4]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 4], dl
+ movzx edx, byte ptr [eax + 5]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 5], dl
+ movzx edx, byte ptr [eax + 6]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 6], dl
+ movzx edx, byte ptr [eax + 7] // copy alpha.
+ mov byte ptr [edi + 7], dl
+
+ movd esi, xmm0
+ pshufd xmm0, xmm0, 0x39 // 00111001 to rotate right 32
+
+ movzx edx, byte ptr [eax + 8]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 8], dl
+ movzx edx, byte ptr [eax + 9]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 9], dl
+ movzx edx, byte ptr [eax + 10]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 10], dl
+ movzx edx, byte ptr [eax + 11] // copy alpha.
+ mov byte ptr [edi + 11], dl
+
+ movd esi, xmm0
+
+ movzx edx, byte ptr [eax + 12]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 12], dl
+ movzx edx, byte ptr [eax + 13]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 13], dl
+ movzx edx, byte ptr [eax + 14]
+ movzx edx, byte ptr [esi + edx]
+ mov byte ptr [edi + 14], dl
+ movzx edx, byte ptr [eax + 15] // copy alpha.
+ mov byte ptr [edi + 15], dl
+
+ lea eax, [eax + 16]
+ lea edi, [edi + 16]
+ sub ecx, 4
+ jg convertloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+#endif // HAS_ARGBLUMACOLORTABLEROW_SSSE3
+
+#endif // defined(_M_X64)
+#endif // !defined(LIBYUV_DISABLE_X86) && (defined(_M_IX86) || defined(_M_X64))
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/row_x86.asm b/media/libaom/src/third_party/libyuv/source/row_x86.asm
new file mode 100644
index 000000000..0cb326f8e
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/row_x86.asm
@@ -0,0 +1,146 @@
+;
+; Copyright 2012 The LibYuv Project Authors. All rights reserved.
+;
+; Use of this source code is governed by a BSD-style license
+; that can be found in the LICENSE file in the root of the source
+; tree. An additional intellectual property rights grant can be found
+; in the file PATENTS. All contributing project authors may
+; be found in the AUTHORS file in the root of the source tree.
+;
+
+%ifdef __YASM_VERSION_ID__
+%if __YASM_VERSION_ID__ < 01020000h
+%error AVX2 is supported only by yasm 1.2.0 or later.
+%endif
+%endif
+%include "x86inc.asm"
+
+SECTION .text
+
+; cglobal numeric constants are parameters, gpr regs, mm regs
+
+; void YUY2ToYRow_SSE2(const uint8* src_yuy2, uint8* dst_y, int pix)
+
+%macro YUY2TOYROW 2-3
+cglobal %1ToYRow%3, 3, 3, 3, src_yuy2, dst_y, pix
+%ifidn %1,YUY2
+ pcmpeqb m2, m2, m2 ; generate mask 0x00ff00ff
+ psrlw m2, m2, 8
+%endif
+
+ ALIGN 4
+.convertloop:
+ mov%2 m0, [src_yuy2q]
+ mov%2 m1, [src_yuy2q + mmsize]
+ lea src_yuy2q, [src_yuy2q + mmsize * 2]
+%ifidn %1,YUY2
+ pand m0, m0, m2 ; YUY2 even bytes are Y
+ pand m1, m1, m2
+%else
+ psrlw m0, m0, 8 ; UYVY odd bytes are Y
+ psrlw m1, m1, 8
+%endif
+ packuswb m0, m0, m1
+%if cpuflag(AVX2)
+ vpermq m0, m0, 0xd8
+%endif
+ sub pixd, mmsize
+ mov%2 [dst_yq], m0
+ lea dst_yq, [dst_yq + mmsize]
+ jg .convertloop
+ REP_RET
+%endmacro
+
+; TODO(fbarchard): Remove MMX. Add SSSE3 pshufb version.
+INIT_MMX MMX
+YUY2TOYROW YUY2,a,
+YUY2TOYROW YUY2,u,_Unaligned
+YUY2TOYROW UYVY,a,
+YUY2TOYROW UYVY,u,_Unaligned
+INIT_XMM SSE2
+YUY2TOYROW YUY2,a,
+YUY2TOYROW YUY2,u,_Unaligned
+YUY2TOYROW UYVY,a,
+YUY2TOYROW UYVY,u,_Unaligned
+INIT_YMM AVX2
+YUY2TOYROW YUY2,a,
+YUY2TOYROW UYVY,a,
+
+; void SplitUVRow_SSE2(const uint8* src_uv, uint8* dst_u, uint8* dst_v, int pix)
+
+%macro SplitUVRow 1-2
+cglobal SplitUVRow%2, 4, 4, 5, src_uv, dst_u, dst_v, pix
+ pcmpeqb m4, m4, m4 ; generate mask 0x00ff00ff
+ psrlw m4, m4, 8
+ sub dst_vq, dst_uq
+
+ ALIGN 4
+.convertloop:
+ mov%1 m0, [src_uvq]
+ mov%1 m1, [src_uvq + mmsize]
+ lea src_uvq, [src_uvq + mmsize * 2]
+ psrlw m2, m0, 8 ; odd bytes
+ psrlw m3, m1, 8
+ pand m0, m0, m4 ; even bytes
+ pand m1, m1, m4
+ packuswb m0, m0, m1
+ packuswb m2, m2, m3
+%if cpuflag(AVX2)
+ vpermq m0, m0, 0xd8
+ vpermq m2, m2, 0xd8
+%endif
+ mov%1 [dst_uq], m0
+ mov%1 [dst_uq + dst_vq], m2
+ lea dst_uq, [dst_uq + mmsize]
+ sub pixd, mmsize
+ jg .convertloop
+ REP_RET
+%endmacro
+
+INIT_MMX MMX
+SplitUVRow a,
+SplitUVRow u,_Unaligned
+INIT_XMM SSE2
+SplitUVRow a,
+SplitUVRow u,_Unaligned
+INIT_YMM AVX2
+SplitUVRow a,
+
+; void MergeUVRow_SSE2(const uint8* src_u, const uint8* src_v, uint8* dst_uv,
+; int width);
+
+%macro MergeUVRow_ 1-2
+cglobal MergeUVRow_%2, 4, 4, 3, src_u, src_v, dst_uv, pix
+ sub src_vq, src_uq
+
+ ALIGN 4
+.convertloop:
+ mov%1 m0, [src_uq]
+ mov%1 m1, [src_vq]
+ lea src_uq, [src_uq + mmsize]
+ punpcklbw m2, m0, m1 // first 8 UV pairs
+ punpckhbw m0, m0, m1 // next 8 UV pairs
+%if cpuflag(AVX2)
+ vperm2i128 m1, m2, m0, 0x20 // low 128 of ymm2 and low 128 of ymm0
+ vperm2i128 m2, m2, m0, 0x31 // high 128 of ymm2 and high 128 of ymm0
+ mov%1 [dst_uvq], m1
+ mov%1 [dst_uvq + mmsize], m2
+%else
+ mov%1 [dst_uvq], m2
+ mov%1 [dst_uvq + mmsize], m0
+%endif
+ lea dst_uvq, [dst_uvq + mmsize * 2]
+ sub pixd, mmsize
+ jg .convertloop
+ REP_RET
+%endmacro
+
+INIT_MMX MMX
+MergeUVRow_ a,
+MergeUVRow_ u,_Unaligned
+INIT_XMM SSE2
+MergeUVRow_ a,
+MergeUVRow_ u,_Unaligned
+INIT_YMM AVX2
+MergeUVRow_ a,
+
diff --git a/media/libaom/src/third_party/libyuv/source/scale.cc b/media/libaom/src/third_party/libyuv/source/scale.cc
new file mode 100644
index 000000000..0a01304c4
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale.cc
@@ -0,0 +1,1689 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/scale.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "libyuv/cpu_id.h"
+#include "libyuv/planar_functions.h" // For CopyPlane
+#include "libyuv/row.h"
+#include "libyuv/scale_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+static __inline int Abs(int v) {
+ return v >= 0 ? v : -v;
+}
+
+#define SUBSAMPLE(v, a, s) (v < 0) ? (-((-v + a) >> s)) : ((v + a) >> s)
+
+// Scale plane, 1/2
+// This is an optimized version for scaling down a plane to 1/2 of
+// its original size.
+
+static void ScalePlaneDown2(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown2)(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) =
+ filtering == kFilterNone ? ScaleRowDown2_C :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_C : ScaleRowDown2Box_C);
+ int row_stride = src_stride << 1;
+ if (!filtering) {
+ src_ptr += src_stride; // Point to odd rows.
+ src_stride = 0;
+ }
+
+#if defined(HAS_SCALEROWDOWN2_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_Any_NEON :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_NEON :
+ ScaleRowDown2Box_Any_NEON);
+ if (IS_ALIGNED(dst_width, 16)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_NEON :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_NEON :
+ ScaleRowDown2Box_NEON);
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN2_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_Any_SSE2 :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_SSE2 :
+ ScaleRowDown2Box_Any_SSE2);
+ if (IS_ALIGNED(dst_width, 16)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_SSE2 :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_SSE2 :
+ ScaleRowDown2Box_SSE2);
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN2_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_Any_AVX2 :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_Any_AVX2 :
+ ScaleRowDown2Box_Any_AVX2);
+ if (IS_ALIGNED(dst_width, 32)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_AVX2 :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_AVX2 :
+ ScaleRowDown2Box_AVX2);
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN2_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_ptr, 4) &&
+ IS_ALIGNED(src_stride, 4) && IS_ALIGNED(row_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ ScaleRowDown2 = filtering ?
+ ScaleRowDown2Box_MIPS_DSPR2 : ScaleRowDown2_MIPS_DSPR2;
+ }
+#endif
+
+ if (filtering == kFilterLinear) {
+ src_stride = 0;
+ }
+ // TODO(fbarchard): Loop through source height to allow odd height.
+ for (y = 0; y < dst_height; ++y) {
+ ScaleRowDown2(src_ptr, src_stride, dst_ptr, dst_width);
+ src_ptr += row_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+static void ScalePlaneDown2_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown2)(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width) =
+ filtering == kFilterNone ? ScaleRowDown2_16_C :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_16_C :
+ ScaleRowDown2Box_16_C);
+ int row_stride = src_stride << 1;
+ if (!filtering) {
+ src_ptr += src_stride; // Point to odd rows.
+ src_stride = 0;
+ }
+
+#if defined(HAS_SCALEROWDOWN2_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(dst_width, 16)) {
+ ScaleRowDown2 = filtering ? ScaleRowDown2Box_16_NEON :
+ ScaleRowDown2_16_NEON;
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN2_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 16)) {
+ ScaleRowDown2 = filtering == kFilterNone ? ScaleRowDown2_16_SSE2 :
+ (filtering == kFilterLinear ? ScaleRowDown2Linear_16_SSE2 :
+ ScaleRowDown2Box_16_SSE2);
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN2_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_ptr, 4) &&
+ IS_ALIGNED(src_stride, 4) && IS_ALIGNED(row_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ ScaleRowDown2 = filtering ?
+ ScaleRowDown2Box_16_MIPS_DSPR2 : ScaleRowDown2_16_MIPS_DSPR2;
+ }
+#endif
+
+ if (filtering == kFilterLinear) {
+ src_stride = 0;
+ }
+ // TODO(fbarchard): Loop through source height to allow odd height.
+ for (y = 0; y < dst_height; ++y) {
+ ScaleRowDown2(src_ptr, src_stride, dst_ptr, dst_width);
+ src_ptr += row_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+// Scale plane, 1/4
+// This is an optimized version for scaling down a plane to 1/4 of
+// its original size.
+
+static void ScalePlaneDown4(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown4)(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) =
+ filtering ? ScaleRowDown4Box_C : ScaleRowDown4_C;
+ int row_stride = src_stride << 2;
+ if (!filtering) {
+ src_ptr += src_stride * 2; // Point to row 2.
+ src_stride = 0;
+ }
+#if defined(HAS_SCALEROWDOWN4_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleRowDown4 = filtering ?
+ ScaleRowDown4Box_Any_NEON : ScaleRowDown4_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleRowDown4 = filtering ? ScaleRowDown4Box_NEON : ScaleRowDown4_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN4_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ScaleRowDown4 = filtering ?
+ ScaleRowDown4Box_Any_SSE2 : ScaleRowDown4_Any_SSE2;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleRowDown4 = filtering ? ScaleRowDown4Box_SSE2 : ScaleRowDown4_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN4_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ScaleRowDown4 = filtering ?
+ ScaleRowDown4Box_Any_AVX2 : ScaleRowDown4_Any_AVX2;
+ if (IS_ALIGNED(dst_width, 16)) {
+ ScaleRowDown4 = filtering ? ScaleRowDown4Box_AVX2 : ScaleRowDown4_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN4_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(row_stride, 4) &&
+ IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ ScaleRowDown4 = filtering ?
+ ScaleRowDown4Box_MIPS_DSPR2 : ScaleRowDown4_MIPS_DSPR2;
+ }
+#endif
+
+ if (filtering == kFilterLinear) {
+ src_stride = 0;
+ }
+ for (y = 0; y < dst_height; ++y) {
+ ScaleRowDown4(src_ptr, src_stride, dst_ptr, dst_width);
+ src_ptr += row_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+static void ScalePlaneDown4_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown4)(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width) =
+ filtering ? ScaleRowDown4Box_16_C : ScaleRowDown4_16_C;
+ int row_stride = src_stride << 2;
+ if (!filtering) {
+ src_ptr += src_stride * 2; // Point to row 2.
+ src_stride = 0;
+ }
+#if defined(HAS_SCALEROWDOWN4_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && IS_ALIGNED(dst_width, 8)) {
+ ScaleRowDown4 = filtering ? ScaleRowDown4Box_16_NEON :
+ ScaleRowDown4_16_NEON;
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN4_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleRowDown4 = filtering ? ScaleRowDown4Box_16_SSE2 :
+ ScaleRowDown4_16_SSE2;
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN4_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(row_stride, 4) &&
+ IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ ScaleRowDown4 = filtering ?
+ ScaleRowDown4Box_16_MIPS_DSPR2 : ScaleRowDown4_16_MIPS_DSPR2;
+ }
+#endif
+
+ if (filtering == kFilterLinear) {
+ src_stride = 0;
+ }
+ for (y = 0; y < dst_height; ++y) {
+ ScaleRowDown4(src_ptr, src_stride, dst_ptr, dst_width);
+ src_ptr += row_stride;
+ dst_ptr += dst_stride;
+ }
+}
+
+// Scale plane down, 3/4
+
+static void ScalePlaneDown34(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown34_0)(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+ void (*ScaleRowDown34_1)(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+ const int filter_stride = (filtering == kFilterLinear) ? 0 : src_stride;
+ assert(dst_width % 3 == 0);
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_C;
+ ScaleRowDown34_1 = ScaleRowDown34_C;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_C;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_C;
+ }
+#if defined(HAS_SCALEROWDOWN34_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_Any_NEON;
+ ScaleRowDown34_1 = ScaleRowDown34_Any_NEON;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_Any_NEON;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_Any_NEON;
+ }
+ if (dst_width % 24 == 0) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_NEON;
+ ScaleRowDown34_1 = ScaleRowDown34_NEON;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_NEON;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_NEON;
+ }
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN34_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_Any_SSSE3;
+ ScaleRowDown34_1 = ScaleRowDown34_Any_SSSE3;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_Any_SSSE3;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_Any_SSSE3;
+ }
+ if (dst_width % 24 == 0) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_SSSE3;
+ ScaleRowDown34_1 = ScaleRowDown34_SSSE3;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_SSSE3;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_SSSE3;
+ }
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN34_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 24 == 0) &&
+ IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_MIPS_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_MIPS_DSPR2;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_MIPS_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ for (y = 0; y < dst_height - 2; y += 3) {
+ ScaleRowDown34_0(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ScaleRowDown34_1(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ScaleRowDown34_0(src_ptr + src_stride, -filter_stride,
+ dst_ptr, dst_width);
+ src_ptr += src_stride * 2;
+ dst_ptr += dst_stride;
+ }
+
+ // Remainder 1 or 2 rows with last row vertically unfiltered
+ if ((dst_height % 3) == 2) {
+ ScaleRowDown34_0(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ScaleRowDown34_1(src_ptr, 0, dst_ptr, dst_width);
+ } else if ((dst_height % 3) == 1) {
+ ScaleRowDown34_0(src_ptr, 0, dst_ptr, dst_width);
+ }
+}
+
+static void ScalePlaneDown34_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown34_0)(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width);
+ void (*ScaleRowDown34_1)(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width);
+ const int filter_stride = (filtering == kFilterLinear) ? 0 : src_stride;
+ assert(dst_width % 3 == 0);
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_16_C;
+ ScaleRowDown34_1 = ScaleRowDown34_16_C;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_C;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_C;
+ }
+#if defined(HAS_SCALEROWDOWN34_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && (dst_width % 24 == 0)) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_16_NEON;
+ ScaleRowDown34_1 = ScaleRowDown34_16_NEON;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_NEON;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN34_16_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && (dst_width % 24 == 0)) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_16_SSSE3;
+ ScaleRowDown34_1 = ScaleRowDown34_16_SSSE3;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_SSSE3;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN34_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 24 == 0) &&
+ IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ if (!filtering) {
+ ScaleRowDown34_0 = ScaleRowDown34_16_MIPS_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_16_MIPS_DSPR2;
+ } else {
+ ScaleRowDown34_0 = ScaleRowDown34_0_Box_16_MIPS_DSPR2;
+ ScaleRowDown34_1 = ScaleRowDown34_1_Box_16_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ for (y = 0; y < dst_height - 2; y += 3) {
+ ScaleRowDown34_0(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ScaleRowDown34_1(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ScaleRowDown34_0(src_ptr + src_stride, -filter_stride,
+ dst_ptr, dst_width);
+ src_ptr += src_stride * 2;
+ dst_ptr += dst_stride;
+ }
+
+ // Remainder 1 or 2 rows with last row vertically unfiltered
+ if ((dst_height % 3) == 2) {
+ ScaleRowDown34_0(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride;
+ dst_ptr += dst_stride;
+ ScaleRowDown34_1(src_ptr, 0, dst_ptr, dst_width);
+ } else if ((dst_height % 3) == 1) {
+ ScaleRowDown34_0(src_ptr, 0, dst_ptr, dst_width);
+ }
+}
+
+
+// Scale plane, 3/8
+// This is an optimized version for scaling down a plane to 3/8
+// of its original size.
+//
+// Uses box filter arranges like this
+// aaabbbcc -> abc
+// aaabbbcc def
+// aaabbbcc ghi
+// dddeeeff
+// dddeeeff
+// dddeeeff
+// ggghhhii
+// ggghhhii
+// Boxes are 3x3, 2x3, 3x2 and 2x2
+
+static void ScalePlaneDown38(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown38_3)(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+ void (*ScaleRowDown38_2)(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width);
+ const int filter_stride = (filtering == kFilterLinear) ? 0 : src_stride;
+ assert(dst_width % 3 == 0);
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_C;
+ ScaleRowDown38_2 = ScaleRowDown38_C;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_C;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_C;
+ }
+
+#if defined(HAS_SCALEROWDOWN38_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_Any_NEON;
+ ScaleRowDown38_2 = ScaleRowDown38_Any_NEON;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_Any_NEON;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_Any_NEON;
+ }
+ if (dst_width % 12 == 0) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_NEON;
+ ScaleRowDown38_2 = ScaleRowDown38_NEON;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_NEON;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_NEON;
+ }
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN38_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_Any_SSSE3;
+ ScaleRowDown38_2 = ScaleRowDown38_Any_SSSE3;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_Any_SSSE3;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_Any_SSSE3;
+ }
+ if (dst_width % 12 == 0 && !filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_SSSE3;
+ ScaleRowDown38_2 = ScaleRowDown38_SSSE3;
+ }
+ if (dst_width % 6 == 0 && filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_SSSE3;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN38_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 12 == 0) &&
+ IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_MIPS_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_MIPS_DSPR2;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_MIPS_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ for (y = 0; y < dst_height - 2; y += 3) {
+ ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 3;
+ dst_ptr += dst_stride;
+ ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 3;
+ dst_ptr += dst_stride;
+ ScaleRowDown38_2(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 2;
+ dst_ptr += dst_stride;
+ }
+
+ // Remainder 1 or 2 rows with last row vertically unfiltered
+ if ((dst_height % 3) == 2) {
+ ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 3;
+ dst_ptr += dst_stride;
+ ScaleRowDown38_3(src_ptr, 0, dst_ptr, dst_width);
+ } else if ((dst_height % 3) == 1) {
+ ScaleRowDown38_3(src_ptr, 0, dst_ptr, dst_width);
+ }
+}
+
+static void ScalePlaneDown38_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr,
+ enum FilterMode filtering) {
+ int y;
+ void (*ScaleRowDown38_3)(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width);
+ void (*ScaleRowDown38_2)(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width);
+ const int filter_stride = (filtering == kFilterLinear) ? 0 : src_stride;
+ assert(dst_width % 3 == 0);
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_16_C;
+ ScaleRowDown38_2 = ScaleRowDown38_16_C;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_C;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_C;
+ }
+#if defined(HAS_SCALEROWDOWN38_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && (dst_width % 12 == 0)) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_16_NEON;
+ ScaleRowDown38_2 = ScaleRowDown38_16_NEON;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_NEON;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN38_16_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && (dst_width % 24 == 0)) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_16_SSSE3;
+ ScaleRowDown38_2 = ScaleRowDown38_16_SSSE3;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_SSSE3;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_SCALEROWDOWN38_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && (dst_width % 12 == 0) &&
+ IS_ALIGNED(src_ptr, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_ptr, 4) && IS_ALIGNED(dst_stride, 4)) {
+ if (!filtering) {
+ ScaleRowDown38_3 = ScaleRowDown38_16_MIPS_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_16_MIPS_DSPR2;
+ } else {
+ ScaleRowDown38_3 = ScaleRowDown38_3_Box_16_MIPS_DSPR2;
+ ScaleRowDown38_2 = ScaleRowDown38_2_Box_16_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ for (y = 0; y < dst_height - 2; y += 3) {
+ ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 3;
+ dst_ptr += dst_stride;
+ ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 3;
+ dst_ptr += dst_stride;
+ ScaleRowDown38_2(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 2;
+ dst_ptr += dst_stride;
+ }
+
+ // Remainder 1 or 2 rows with last row vertically unfiltered
+ if ((dst_height % 3) == 2) {
+ ScaleRowDown38_3(src_ptr, filter_stride, dst_ptr, dst_width);
+ src_ptr += src_stride * 3;
+ dst_ptr += dst_stride;
+ ScaleRowDown38_3(src_ptr, 0, dst_ptr, dst_width);
+ } else if ((dst_height % 3) == 1) {
+ ScaleRowDown38_3(src_ptr, 0, dst_ptr, dst_width);
+ }
+}
+
+#define MIN1(x) ((x) < 1 ? 1 : (x))
+
+static __inline uint32 SumPixels(int iboxwidth, const uint16* src_ptr) {
+ uint32 sum = 0u;
+ int x;
+ assert(iboxwidth > 0);
+ for (x = 0; x < iboxwidth; ++x) {
+ sum += src_ptr[x];
+ }
+ return sum;
+}
+
+static __inline uint32 SumPixels_16(int iboxwidth, const uint32* src_ptr) {
+ uint32 sum = 0u;
+ int x;
+ assert(iboxwidth > 0);
+ for (x = 0; x < iboxwidth; ++x) {
+ sum += src_ptr[x];
+ }
+ return sum;
+}
+
+static void ScaleAddCols2_C(int dst_width, int boxheight, int x, int dx,
+ const uint16* src_ptr, uint8* dst_ptr) {
+ int i;
+ int scaletbl[2];
+ int minboxwidth = dx >> 16;
+ int* scaleptr = scaletbl - minboxwidth;
+ int boxwidth;
+ scaletbl[0] = 65536 / (MIN1(minboxwidth) * boxheight);
+ scaletbl[1] = 65536 / (MIN1(minboxwidth + 1) * boxheight);
+ for (i = 0; i < dst_width; ++i) {
+ int ix = x >> 16;
+ x += dx;
+ boxwidth = MIN1((x >> 16) - ix);
+ *dst_ptr++ = SumPixels(boxwidth, src_ptr + ix) * scaleptr[boxwidth] >> 16;
+ }
+}
+
+static void ScaleAddCols2_16_C(int dst_width, int boxheight, int x, int dx,
+ const uint32* src_ptr, uint16* dst_ptr) {
+ int i;
+ int scaletbl[2];
+ int minboxwidth = dx >> 16;
+ int* scaleptr = scaletbl - minboxwidth;
+ int boxwidth;
+ scaletbl[0] = 65536 / (MIN1(minboxwidth) * boxheight);
+ scaletbl[1] = 65536 / (MIN1(minboxwidth + 1) * boxheight);
+ for (i = 0; i < dst_width; ++i) {
+ int ix = x >> 16;
+ x += dx;
+ boxwidth = MIN1((x >> 16) - ix);
+ *dst_ptr++ =
+ SumPixels_16(boxwidth, src_ptr + ix) * scaleptr[boxwidth] >> 16;
+ }
+}
+
+static void ScaleAddCols0_C(int dst_width, int boxheight, int x, int,
+ const uint16* src_ptr, uint8* dst_ptr) {
+ int scaleval = 65536 / boxheight;
+ int i;
+ src_ptr += (x >> 16);
+ for (i = 0; i < dst_width; ++i) {
+ *dst_ptr++ = src_ptr[i] * scaleval >> 16;
+ }
+}
+
+static void ScaleAddCols1_C(int dst_width, int boxheight, int x, int dx,
+ const uint16* src_ptr, uint8* dst_ptr) {
+ int boxwidth = MIN1(dx >> 16);
+ int scaleval = 65536 / (boxwidth * boxheight);
+ int i;
+ x >>= 16;
+ for (i = 0; i < dst_width; ++i) {
+ *dst_ptr++ = SumPixels(boxwidth, src_ptr + x) * scaleval >> 16;
+ x += boxwidth;
+ }
+}
+
+static void ScaleAddCols1_16_C(int dst_width, int boxheight, int x, int dx,
+ const uint32* src_ptr, uint16* dst_ptr) {
+ int boxwidth = MIN1(dx >> 16);
+ int scaleval = 65536 / (boxwidth * boxheight);
+ int i;
+ for (i = 0; i < dst_width; ++i) {
+ *dst_ptr++ = SumPixels_16(boxwidth, src_ptr + x) * scaleval >> 16;
+ x += boxwidth;
+ }
+}
+
+// Scale plane down to any dimensions, with interpolation.
+// (boxfilter).
+//
+// Same method as SimpleScale, which is fixed point, outputting
+// one pixel of destination using fixed point (16.16) to step
+// through source, sampling a box of pixel with simple
+// averaging.
+static void ScalePlaneBox(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr) {
+ int j, k;
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ const int max_y = (src_height << 16);
+ ScaleSlope(src_width, src_height, dst_width, dst_height, kFilterBox,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+ {
+ // Allocate a row buffer of uint16.
+ align_buffer_64(row16, src_width * 2);
+ void (*ScaleAddCols)(int dst_width, int boxheight, int x, int dx,
+ const uint16* src_ptr, uint8* dst_ptr) =
+ (dx & 0xffff) ? ScaleAddCols2_C:
+ ((dx != 0x10000) ? ScaleAddCols1_C : ScaleAddCols0_C);
+ void (*ScaleAddRow)(const uint8* src_ptr, uint16* dst_ptr, int src_width) =
+ ScaleAddRow_C;
+#if defined(HAS_SCALEADDROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ScaleAddRow = ScaleAddRow_Any_SSE2;
+ if (IS_ALIGNED(src_width, 16)) {
+ ScaleAddRow = ScaleAddRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEADDROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ ScaleAddRow = ScaleAddRow_Any_AVX2;
+ if (IS_ALIGNED(src_width, 32)) {
+ ScaleAddRow = ScaleAddRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEADDROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleAddRow = ScaleAddRow_Any_NEON;
+ if (IS_ALIGNED(src_width, 16)) {
+ ScaleAddRow = ScaleAddRow_NEON;
+ }
+ }
+#endif
+
+ for (j = 0; j < dst_height; ++j) {
+ int boxheight;
+ int iy = y >> 16;
+ const uint8* src = src_ptr + iy * src_stride;
+ y += dy;
+ if (y > max_y) {
+ y = max_y;
+ }
+ boxheight = MIN1((y >> 16) - iy);
+ memset(row16, 0, src_width * 2);
+ for (k = 0; k < boxheight; ++k) {
+ ScaleAddRow(src, (uint16 *)(row16), src_width);
+ src += src_stride;
+ }
+ ScaleAddCols(dst_width, boxheight, x, dx, (uint16*)(row16), dst_ptr);
+ dst_ptr += dst_stride;
+ }
+ free_aligned_buffer_64(row16);
+ }
+}
+
+static void ScalePlaneBox_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr) {
+ int j, k;
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ const int max_y = (src_height << 16);
+ ScaleSlope(src_width, src_height, dst_width, dst_height, kFilterBox,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+ {
+ // Allocate a row buffer of uint32.
+ align_buffer_64(row32, src_width * 4);
+ void (*ScaleAddCols)(int dst_width, int boxheight, int x, int dx,
+ const uint32* src_ptr, uint16* dst_ptr) =
+ (dx & 0xffff) ? ScaleAddCols2_16_C: ScaleAddCols1_16_C;
+ void (*ScaleAddRow)(const uint16* src_ptr, uint32* dst_ptr, int src_width) =
+ ScaleAddRow_16_C;
+
+#if defined(HAS_SCALEADDROW_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(src_width, 16)) {
+ ScaleAddRow = ScaleAddRow_16_SSE2;
+ }
+#endif
+
+ for (j = 0; j < dst_height; ++j) {
+ int boxheight;
+ int iy = y >> 16;
+ const uint16* src = src_ptr + iy * src_stride;
+ y += dy;
+ if (y > max_y) {
+ y = max_y;
+ }
+ boxheight = MIN1((y >> 16) - iy);
+ memset(row32, 0, src_width * 4);
+ for (k = 0; k < boxheight; ++k) {
+ ScaleAddRow(src, (uint32 *)(row32), src_width);
+ src += src_stride;
+ }
+ ScaleAddCols(dst_width, boxheight, x, dx, (uint32*)(row32), dst_ptr);
+ dst_ptr += dst_stride;
+ }
+ free_aligned_buffer_64(row32);
+ }
+}
+
+// Scale plane down with bilinear interpolation.
+void ScalePlaneBilinearDown(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr,
+ enum FilterMode filtering) {
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
+ // Allocate a row buffer.
+ align_buffer_64(row, src_width);
+
+ const int max_y = (src_height - 1) << 16;
+ int j;
+ void (*ScaleFilterCols)(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) =
+ (src_width >= 32768) ? ScaleFilterCols64_C : ScaleFilterCols_C;
+ void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_C;
+ ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(src_width, 16)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(src_width, 16)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(src_width, 32)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(src_width, 16)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
+ InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+ if (IS_ALIGNED(src_width, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+ }
+#endif
+
+
+#if defined(HAS_SCALEFILTERCOLS_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleFilterCols = ScaleFilterCols_SSSE3;
+ }
+#endif
+#if defined(HAS_SCALEFILTERCOLS_NEON)
+ if (TestCpuFlag(kCpuHasNEON) && src_width < 32768) {
+ ScaleFilterCols = ScaleFilterCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleFilterCols = ScaleFilterCols_NEON;
+ }
+ }
+#endif
+ if (y > max_y) {
+ y = max_y;
+ }
+
+ for (j = 0; j < dst_height; ++j) {
+ int yi = y >> 16;
+ const uint8* src = src_ptr + yi * src_stride;
+ if (filtering == kFilterLinear) {
+ ScaleFilterCols(dst_ptr, src, dst_width, x, dx);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow(row, src, src_stride, src_width, yf);
+ ScaleFilterCols(dst_ptr, row, dst_width, x, dx);
+ }
+ dst_ptr += dst_stride;
+ y += dy;
+ if (y > max_y) {
+ y = max_y;
+ }
+ }
+ free_aligned_buffer_64(row);
+}
+
+void ScalePlaneBilinearDown_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr,
+ enum FilterMode filtering) {
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
+ // Allocate a row buffer.
+ align_buffer_64(row, src_width * 2);
+
+ const int max_y = (src_height - 1) << 16;
+ int j;
+ void (*ScaleFilterCols)(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx) =
+ (src_width >= 32768) ? ScaleFilterCols64_16_C : ScaleFilterCols_16_C;
+ void (*InterpolateRow)(uint16* dst_ptr, const uint16* src_ptr,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_16_C;
+ ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+
+#if defined(HAS_INTERPOLATEROW_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_16_SSE2;
+ if (IS_ALIGNED(src_width, 16)) {
+ InterpolateRow = InterpolateRow_16_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_16_SSSE3;
+ if (IS_ALIGNED(src_width, 16)) {
+ InterpolateRow = InterpolateRow_16_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_16_AVX2;
+ if (IS_ALIGNED(src_width, 32)) {
+ InterpolateRow = InterpolateRow_16_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_16_NEON;
+ if (IS_ALIGNED(src_width, 16)) {
+ InterpolateRow = InterpolateRow_16_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
+ InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2;
+ if (IS_ALIGNED(src_width, 4)) {
+ InterpolateRow = InterpolateRow_16_MIPS_DSPR2;
+ }
+ }
+#endif
+
+
+#if defined(HAS_SCALEFILTERCOLS_16_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleFilterCols = ScaleFilterCols_16_SSSE3;
+ }
+#endif
+ if (y > max_y) {
+ y = max_y;
+ }
+
+ for (j = 0; j < dst_height; ++j) {
+ int yi = y >> 16;
+ const uint16* src = src_ptr + yi * src_stride;
+ if (filtering == kFilterLinear) {
+ ScaleFilterCols(dst_ptr, src, dst_width, x, dx);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow((uint16*)row, src, src_stride, src_width, yf);
+ ScaleFilterCols(dst_ptr, (uint16*)row, dst_width, x, dx);
+ }
+ dst_ptr += dst_stride;
+ y += dy;
+ if (y > max_y) {
+ y = max_y;
+ }
+ }
+ free_aligned_buffer_64(row);
+}
+
+// Scale up down with bilinear interpolation.
+void ScalePlaneBilinearUp(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr,
+ enum FilterMode filtering) {
+ int j;
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ const int max_y = (src_height - 1) << 16;
+ void (*InterpolateRow)(uint8* dst_ptr, const uint8* src_ptr,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_C;
+ void (*ScaleFilterCols)(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) =
+ filtering ? ScaleFilterCols_C : ScaleCols_C;
+ ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(dst_width, 16)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(dst_width, 16)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(dst_width, 32)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(dst_width, 16)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
+ InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ if (filtering && src_width >= 32768) {
+ ScaleFilterCols = ScaleFilterCols64_C;
+ }
+#if defined(HAS_SCALEFILTERCOLS_SSSE3)
+ if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleFilterCols = ScaleFilterCols_SSSE3;
+ }
+#endif
+#if defined(HAS_SCALEFILTERCOLS_NEON)
+ if (filtering && TestCpuFlag(kCpuHasNEON) && src_width < 32768) {
+ ScaleFilterCols = ScaleFilterCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleFilterCols = ScaleFilterCols_NEON;
+ }
+ }
+#endif
+ if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
+ ScaleFilterCols = ScaleColsUp2_C;
+#if defined(HAS_SCALECOLS_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleFilterCols = ScaleColsUp2_SSE2;
+ }
+#endif
+ }
+
+ if (y > max_y) {
+ y = max_y;
+ }
+ {
+ int yi = y >> 16;
+ const uint8* src = src_ptr + yi * src_stride;
+
+ // Allocate 2 row buffers.
+ const int kRowSize = (dst_width + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+
+ uint8* rowptr = row;
+ int rowstride = kRowSize;
+ int lasty = yi;
+
+ ScaleFilterCols(rowptr, src, dst_width, x, dx);
+ if (src_height > 1) {
+ src += src_stride;
+ }
+ ScaleFilterCols(rowptr + rowstride, src, dst_width, x, dx);
+ src += src_stride;
+
+ for (j = 0; j < dst_height; ++j) {
+ yi = y >> 16;
+ if (yi != lasty) {
+ if (y > max_y) {
+ y = max_y;
+ yi = y >> 16;
+ src = src_ptr + yi * src_stride;
+ }
+ if (yi != lasty) {
+ ScaleFilterCols(rowptr, src, dst_width, x, dx);
+ rowptr += rowstride;
+ rowstride = -rowstride;
+ lasty = yi;
+ src += src_stride;
+ }
+ }
+ if (filtering == kFilterLinear) {
+ InterpolateRow(dst_ptr, rowptr, 0, dst_width, 0);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow(dst_ptr, rowptr, rowstride, dst_width, yf);
+ }
+ dst_ptr += dst_stride;
+ y += dy;
+ }
+ free_aligned_buffer_64(row);
+ }
+}
+
+void ScalePlaneBilinearUp_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr,
+ enum FilterMode filtering) {
+ int j;
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ const int max_y = (src_height - 1) << 16;
+ void (*InterpolateRow)(uint16* dst_ptr, const uint16* src_ptr,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_16_C;
+ void (*ScaleFilterCols)(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx) =
+ filtering ? ScaleFilterCols_16_C : ScaleCols_16_C;
+ ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+
+#if defined(HAS_INTERPOLATEROW_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_16_SSE2;
+ if (IS_ALIGNED(dst_width, 16)) {
+ InterpolateRow = InterpolateRow_16_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_16_SSSE3;
+ if (IS_ALIGNED(dst_width, 16)) {
+ InterpolateRow = InterpolateRow_16_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_16_AVX2;
+ if (IS_ALIGNED(dst_width, 32)) {
+ InterpolateRow = InterpolateRow_16_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_16_NEON;
+ if (IS_ALIGNED(dst_width, 16)) {
+ InterpolateRow = InterpolateRow_16_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2)) {
+ InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_16_MIPS_DSPR2;
+ }
+ }
+#endif
+
+ if (filtering && src_width >= 32768) {
+ ScaleFilterCols = ScaleFilterCols64_16_C;
+ }
+#if defined(HAS_SCALEFILTERCOLS_16_SSSE3)
+ if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleFilterCols = ScaleFilterCols_16_SSSE3;
+ }
+#endif
+ if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
+ ScaleFilterCols = ScaleColsUp2_16_C;
+#if defined(HAS_SCALECOLS_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleFilterCols = ScaleColsUp2_16_SSE2;
+ }
+#endif
+ }
+
+ if (y > max_y) {
+ y = max_y;
+ }
+ {
+ int yi = y >> 16;
+ const uint16* src = src_ptr + yi * src_stride;
+
+ // Allocate 2 row buffers.
+ const int kRowSize = (dst_width + 31) & ~31;
+ align_buffer_64(row, kRowSize * 4);
+
+ uint16* rowptr = (uint16*)row;
+ int rowstride = kRowSize;
+ int lasty = yi;
+
+ ScaleFilterCols(rowptr, src, dst_width, x, dx);
+ if (src_height > 1) {
+ src += src_stride;
+ }
+ ScaleFilterCols(rowptr + rowstride, src, dst_width, x, dx);
+ src += src_stride;
+
+ for (j = 0; j < dst_height; ++j) {
+ yi = y >> 16;
+ if (yi != lasty) {
+ if (y > max_y) {
+ y = max_y;
+ yi = y >> 16;
+ src = src_ptr + yi * src_stride;
+ }
+ if (yi != lasty) {
+ ScaleFilterCols(rowptr, src, dst_width, x, dx);
+ rowptr += rowstride;
+ rowstride = -rowstride;
+ lasty = yi;
+ src += src_stride;
+ }
+ }
+ if (filtering == kFilterLinear) {
+ InterpolateRow(dst_ptr, rowptr, 0, dst_width, 0);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow(dst_ptr, rowptr, rowstride, dst_width, yf);
+ }
+ dst_ptr += dst_stride;
+ y += dy;
+ }
+ free_aligned_buffer_64(row);
+ }
+}
+
+// Scale Plane to/from any dimensions, without interpolation.
+// Fixed point math is used for performance: The upper 16 bits
+// of x and dx is the integer part of the source position and
+// the lower 16 bits are the fixed decimal part.
+
+static void ScalePlaneSimple(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_ptr, uint8* dst_ptr) {
+ int i;
+ void (*ScaleCols)(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) = ScaleCols_C;
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ ScaleSlope(src_width, src_height, dst_width, dst_height, kFilterNone,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+
+ if (src_width * 2 == dst_width && x < 0x8000) {
+ ScaleCols = ScaleColsUp2_C;
+#if defined(HAS_SCALECOLS_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleCols = ScaleColsUp2_SSE2;
+ }
+#endif
+ }
+
+ for (i = 0; i < dst_height; ++i) {
+ ScaleCols(dst_ptr, src_ptr + (y >> 16) * src_stride, dst_width, x, dx);
+ dst_ptr += dst_stride;
+ y += dy;
+ }
+}
+
+static void ScalePlaneSimple_16(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_ptr, uint16* dst_ptr) {
+ int i;
+ void (*ScaleCols)(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx) = ScaleCols_16_C;
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ ScaleSlope(src_width, src_height, dst_width, dst_height, kFilterNone,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+
+ if (src_width * 2 == dst_width && x < 0x8000) {
+ ScaleCols = ScaleColsUp2_16_C;
+#if defined(HAS_SCALECOLS_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleCols = ScaleColsUp2_16_SSE2;
+ }
+#endif
+ }
+
+ for (i = 0; i < dst_height; ++i) {
+ ScaleCols(dst_ptr, src_ptr + (y >> 16) * src_stride,
+ dst_width, x, dx);
+ dst_ptr += dst_stride;
+ y += dy;
+ }
+}
+
+// Scale a plane.
+// This function dispatches to a specialized scaler based on scale factor.
+
+LIBYUV_API
+void ScalePlane(const uint8* src, int src_stride,
+ int src_width, int src_height,
+ uint8* dst, int dst_stride,
+ int dst_width, int dst_height,
+ enum FilterMode filtering) {
+ // Simplify filtering when possible.
+ filtering = ScaleFilterReduce(src_width, src_height,
+ dst_width, dst_height, filtering);
+
+ // Negative height means invert the image.
+ if (src_height < 0) {
+ src_height = -src_height;
+ src = src + (src_height - 1) * src_stride;
+ src_stride = -src_stride;
+ }
+
+ // Use specialized scales to improve performance for common resolutions.
+ // For example, all the 1/2 scalings will use ScalePlaneDown2()
+ if (dst_width == src_width && dst_height == src_height) {
+ // Straight copy.
+ CopyPlane(src, src_stride, dst, dst_stride, dst_width, dst_height);
+ return;
+ }
+ if (dst_width == src_width && filtering != kFilterBox) {
+ int dy = FixedDiv(src_height, dst_height);
+ // Arbitrary scale vertically, but unscaled horizontally.
+ ScalePlaneVertical(src_height,
+ dst_width, dst_height,
+ src_stride, dst_stride, src, dst,
+ 0, 0, dy, 1, filtering);
+ return;
+ }
+ if (dst_width <= Abs(src_width) && dst_height <= src_height) {
+ // Scale down.
+ if (4 * dst_width == 3 * src_width &&
+ 4 * dst_height == 3 * src_height) {
+ // optimized, 3/4
+ ScalePlaneDown34(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ if (2 * dst_width == src_width && 2 * dst_height == src_height) {
+ // optimized, 1/2
+ ScalePlaneDown2(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ // 3/8 rounded up for odd sized chroma height.
+ if (8 * dst_width == 3 * src_width &&
+ dst_height == ((src_height * 3 + 7) / 8)) {
+ // optimized, 3/8
+ ScalePlaneDown38(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ if (4 * dst_width == src_width && 4 * dst_height == src_height &&
+ (filtering == kFilterBox || filtering == kFilterNone)) {
+ // optimized, 1/4
+ ScalePlaneDown4(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ }
+ if (filtering == kFilterBox && dst_height * 2 < src_height) {
+ ScalePlaneBox(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst);
+ return;
+ }
+ if (filtering && dst_height > src_height) {
+ ScalePlaneBilinearUp(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ if (filtering) {
+ ScalePlaneBilinearDown(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ ScalePlaneSimple(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst);
+}
+
+LIBYUV_API
+void ScalePlane_16(const uint16* src, int src_stride,
+ int src_width, int src_height,
+ uint16* dst, int dst_stride,
+ int dst_width, int dst_height,
+ enum FilterMode filtering) {
+ // Simplify filtering when possible.
+ filtering = ScaleFilterReduce(src_width, src_height,
+ dst_width, dst_height, filtering);
+
+ // Negative height means invert the image.
+ if (src_height < 0) {
+ src_height = -src_height;
+ src = src + (src_height - 1) * src_stride;
+ src_stride = -src_stride;
+ }
+
+ // Use specialized scales to improve performance for common resolutions.
+ // For example, all the 1/2 scalings will use ScalePlaneDown2()
+ if (dst_width == src_width && dst_height == src_height) {
+ // Straight copy.
+ CopyPlane_16(src, src_stride, dst, dst_stride, dst_width, dst_height);
+ return;
+ }
+ if (dst_width == src_width) {
+ int dy = FixedDiv(src_height, dst_height);
+ // Arbitrary scale vertically, but unscaled vertically.
+ ScalePlaneVertical_16(src_height,
+ dst_width, dst_height,
+ src_stride, dst_stride, src, dst,
+ 0, 0, dy, 1, filtering);
+ return;
+ }
+ if (dst_width <= Abs(src_width) && dst_height <= src_height) {
+ // Scale down.
+ if (4 * dst_width == 3 * src_width &&
+ 4 * dst_height == 3 * src_height) {
+ // optimized, 3/4
+ ScalePlaneDown34_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ if (2 * dst_width == src_width && 2 * dst_height == src_height) {
+ // optimized, 1/2
+ ScalePlaneDown2_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ // 3/8 rounded up for odd sized chroma height.
+ if (8 * dst_width == 3 * src_width &&
+ dst_height == ((src_height * 3 + 7) / 8)) {
+ // optimized, 3/8
+ ScalePlaneDown38_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ if (4 * dst_width == src_width && 4 * dst_height == src_height &&
+ filtering != kFilterBilinear) {
+ // optimized, 1/4
+ ScalePlaneDown4_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ }
+ if (filtering == kFilterBox && dst_height * 2 < src_height) {
+ ScalePlaneBox_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst);
+ return;
+ }
+ if (filtering && dst_height > src_height) {
+ ScalePlaneBilinearUp_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ if (filtering) {
+ ScalePlaneBilinearDown_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst, filtering);
+ return;
+ }
+ ScalePlaneSimple_16(src_width, src_height, dst_width, dst_height,
+ src_stride, dst_stride, src, dst);
+}
+
+// Scale an I420 image.
+// This function in turn calls a scaling function for each plane.
+
+LIBYUV_API
+int I420Scale(const uint8* src_y, int src_stride_y,
+ const uint8* src_u, int src_stride_u,
+ const uint8* src_v, int src_stride_v,
+ int src_width, int src_height,
+ uint8* dst_y, int dst_stride_y,
+ uint8* dst_u, int dst_stride_u,
+ uint8* dst_v, int dst_stride_v,
+ int dst_width, int dst_height,
+ enum FilterMode filtering) {
+ int src_halfwidth = SUBSAMPLE(src_width, 1, 1);
+ int src_halfheight = SUBSAMPLE(src_height, 1, 1);
+ int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1);
+ int dst_halfheight = SUBSAMPLE(dst_height, 1, 1);
+ if (!src_y || !src_u || !src_v || src_width == 0 || src_height == 0 ||
+ src_width > 32768 || src_height > 32768 ||
+ !dst_y || !dst_u || !dst_v || dst_width <= 0 || dst_height <= 0) {
+ return -1;
+ }
+
+ ScalePlane(src_y, src_stride_y, src_width, src_height,
+ dst_y, dst_stride_y, dst_width, dst_height,
+ filtering);
+ ScalePlane(src_u, src_stride_u, src_halfwidth, src_halfheight,
+ dst_u, dst_stride_u, dst_halfwidth, dst_halfheight,
+ filtering);
+ ScalePlane(src_v, src_stride_v, src_halfwidth, src_halfheight,
+ dst_v, dst_stride_v, dst_halfwidth, dst_halfheight,
+ filtering);
+ return 0;
+}
+
+LIBYUV_API
+int I420Scale_16(const uint16* src_y, int src_stride_y,
+ const uint16* src_u, int src_stride_u,
+ const uint16* src_v, int src_stride_v,
+ int src_width, int src_height,
+ uint16* dst_y, int dst_stride_y,
+ uint16* dst_u, int dst_stride_u,
+ uint16* dst_v, int dst_stride_v,
+ int dst_width, int dst_height,
+ enum FilterMode filtering) {
+ int src_halfwidth = SUBSAMPLE(src_width, 1, 1);
+ int src_halfheight = SUBSAMPLE(src_height, 1, 1);
+ int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1);
+ int dst_halfheight = SUBSAMPLE(dst_height, 1, 1);
+ if (!src_y || !src_u || !src_v || src_width == 0 || src_height == 0 ||
+ src_width > 32768 || src_height > 32768 ||
+ !dst_y || !dst_u || !dst_v || dst_width <= 0 || dst_height <= 0) {
+ return -1;
+ }
+
+ ScalePlane_16(src_y, src_stride_y, src_width, src_height,
+ dst_y, dst_stride_y, dst_width, dst_height,
+ filtering);
+ ScalePlane_16(src_u, src_stride_u, src_halfwidth, src_halfheight,
+ dst_u, dst_stride_u, dst_halfwidth, dst_halfheight,
+ filtering);
+ ScalePlane_16(src_v, src_stride_v, src_halfwidth, src_halfheight,
+ dst_v, dst_stride_v, dst_halfwidth, dst_halfheight,
+ filtering);
+ return 0;
+}
+
+// Deprecated api
+LIBYUV_API
+int Scale(const uint8* src_y, const uint8* src_u, const uint8* src_v,
+ int src_stride_y, int src_stride_u, int src_stride_v,
+ int src_width, int src_height,
+ uint8* dst_y, uint8* dst_u, uint8* dst_v,
+ int dst_stride_y, int dst_stride_u, int dst_stride_v,
+ int dst_width, int dst_height,
+ LIBYUV_BOOL interpolate) {
+ return I420Scale(src_y, src_stride_y,
+ src_u, src_stride_u,
+ src_v, src_stride_v,
+ src_width, src_height,
+ dst_y, dst_stride_y,
+ dst_u, dst_stride_u,
+ dst_v, dst_stride_v,
+ dst_width, dst_height,
+ interpolate ? kFilterBox : kFilterNone);
+}
+
+// Deprecated api
+LIBYUV_API
+int ScaleOffset(const uint8* src, int src_width, int src_height,
+ uint8* dst, int dst_width, int dst_height, int dst_yoffset,
+ LIBYUV_BOOL interpolate) {
+ // Chroma requires offset to multiple of 2.
+ int dst_yoffset_even = dst_yoffset & ~1;
+ int src_halfwidth = SUBSAMPLE(src_width, 1, 1);
+ int src_halfheight = SUBSAMPLE(src_height, 1, 1);
+ int dst_halfwidth = SUBSAMPLE(dst_width, 1, 1);
+ int dst_halfheight = SUBSAMPLE(dst_height, 1, 1);
+ int aheight = dst_height - dst_yoffset_even * 2; // actual output height
+ const uint8* src_y = src;
+ const uint8* src_u = src + src_width * src_height;
+ const uint8* src_v = src + src_width * src_height +
+ src_halfwidth * src_halfheight;
+ uint8* dst_y = dst + dst_yoffset_even * dst_width;
+ uint8* dst_u = dst + dst_width * dst_height +
+ (dst_yoffset_even >> 1) * dst_halfwidth;
+ uint8* dst_v = dst + dst_width * dst_height + dst_halfwidth * dst_halfheight +
+ (dst_yoffset_even >> 1) * dst_halfwidth;
+ if (!src || src_width <= 0 || src_height <= 0 ||
+ !dst || dst_width <= 0 || dst_height <= 0 || dst_yoffset_even < 0 ||
+ dst_yoffset_even >= dst_height) {
+ return -1;
+ }
+ return I420Scale(src_y, src_width,
+ src_u, src_halfwidth,
+ src_v, src_halfwidth,
+ src_width, src_height,
+ dst_y, dst_width,
+ dst_u, dst_halfwidth,
+ dst_v, dst_halfwidth,
+ dst_width, aheight,
+ interpolate ? kFilterBox : kFilterNone);
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/scale_any.cc b/media/libaom/src/third_party/libyuv/source/scale_any.cc
new file mode 100644
index 000000000..2f6a2c8ba
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_any.cc
@@ -0,0 +1,200 @@
+/*
+ * Copyright 2015 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/scale.h"
+#include "libyuv/scale_row.h"
+
+#include "libyuv/basic_types.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// Definition for ScaleFilterCols, ScaleARGBCols and ScaleARGBFilterCols
+#define CANY(NAMEANY, TERP_SIMD, TERP_C, BPP, MASK) \
+ void NAMEANY(uint8* dst_ptr, const uint8* src_ptr, \
+ int dst_width, int x, int dx) { \
+ int n = dst_width & ~MASK; \
+ if (n > 0) { \
+ TERP_SIMD(dst_ptr, src_ptr, n, x, dx); \
+ } \
+ TERP_C(dst_ptr + n * BPP, src_ptr, \
+ dst_width & MASK, x + n * dx, dx); \
+ }
+
+#ifdef HAS_SCALEFILTERCOLS_NEON
+CANY(ScaleFilterCols_Any_NEON, ScaleFilterCols_NEON, ScaleFilterCols_C, 1, 7)
+#endif
+#ifdef HAS_SCALEARGBCOLS_NEON
+CANY(ScaleARGBCols_Any_NEON, ScaleARGBCols_NEON, ScaleARGBCols_C, 4, 7)
+#endif
+#ifdef HAS_SCALEARGBFILTERCOLS_NEON
+CANY(ScaleARGBFilterCols_Any_NEON, ScaleARGBFilterCols_NEON,
+ ScaleARGBFilterCols_C, 4, 3)
+#endif
+#undef CANY
+
+// Fixed scale down.
+#define SDANY(NAMEANY, SCALEROWDOWN_SIMD, SCALEROWDOWN_C, FACTOR, BPP, MASK) \
+ void NAMEANY(const uint8* src_ptr, ptrdiff_t src_stride, \
+ uint8* dst_ptr, int dst_width) { \
+ int r = (int)((unsigned int)dst_width % (MASK + 1)); \
+ int n = dst_width - r; \
+ if (n > 0) { \
+ SCALEROWDOWN_SIMD(src_ptr, src_stride, dst_ptr, n); \
+ } \
+ SCALEROWDOWN_C(src_ptr + (n * FACTOR) * BPP, src_stride, \
+ dst_ptr + n * BPP, r); \
+ }
+
+#ifdef HAS_SCALEROWDOWN2_SSE2
+SDANY(ScaleRowDown2_Any_SSE2, ScaleRowDown2_SSE2, ScaleRowDown2_C, 2, 1, 15)
+SDANY(ScaleRowDown2Linear_Any_SSE2, ScaleRowDown2Linear_SSE2,
+ ScaleRowDown2Linear_C, 2, 1, 15)
+SDANY(ScaleRowDown2Box_Any_SSE2, ScaleRowDown2Box_SSE2, ScaleRowDown2Box_C,
+ 2, 1, 15)
+#endif
+#ifdef HAS_SCALEROWDOWN2_AVX2
+SDANY(ScaleRowDown2_Any_AVX2, ScaleRowDown2_AVX2, ScaleRowDown2_C, 2, 1, 31)
+SDANY(ScaleRowDown2Linear_Any_AVX2, ScaleRowDown2Linear_AVX2,
+ ScaleRowDown2Linear_C, 2, 1, 31)
+SDANY(ScaleRowDown2Box_Any_AVX2, ScaleRowDown2Box_AVX2, ScaleRowDown2Box_C,
+ 2, 1, 31)
+#endif
+#ifdef HAS_SCALEROWDOWN2_NEON
+SDANY(ScaleRowDown2_Any_NEON, ScaleRowDown2_NEON, ScaleRowDown2_C, 2, 1, 15)
+SDANY(ScaleRowDown2Linear_Any_NEON, ScaleRowDown2Linear_NEON,
+ ScaleRowDown2Linear_C, 2, 1, 15)
+SDANY(ScaleRowDown2Box_Any_NEON, ScaleRowDown2Box_NEON,
+ ScaleRowDown2Box_C, 2, 1, 15)
+#endif
+#ifdef HAS_SCALEROWDOWN4_SSE2
+SDANY(ScaleRowDown4_Any_SSE2, ScaleRowDown4_SSE2, ScaleRowDown4_C, 4, 1, 7)
+SDANY(ScaleRowDown4Box_Any_SSE2, ScaleRowDown4Box_SSE2, ScaleRowDown4Box_C,
+ 4, 1, 7)
+#endif
+#ifdef HAS_SCALEROWDOWN4_AVX2
+SDANY(ScaleRowDown4_Any_AVX2, ScaleRowDown4_AVX2, ScaleRowDown4_C, 4, 1, 15)
+SDANY(ScaleRowDown4Box_Any_AVX2, ScaleRowDown4Box_AVX2, ScaleRowDown4Box_C,
+ 4, 1, 15)
+#endif
+#ifdef HAS_SCALEROWDOWN4_NEON
+SDANY(ScaleRowDown4_Any_NEON, ScaleRowDown4_NEON, ScaleRowDown4_C, 4, 1, 7)
+SDANY(ScaleRowDown4Box_Any_NEON, ScaleRowDown4Box_NEON, ScaleRowDown4Box_C,
+ 4, 1, 7)
+#endif
+#ifdef HAS_SCALEROWDOWN34_SSSE3
+SDANY(ScaleRowDown34_Any_SSSE3, ScaleRowDown34_SSSE3,
+ ScaleRowDown34_C, 4 / 3, 1, 23)
+SDANY(ScaleRowDown34_0_Box_Any_SSSE3, ScaleRowDown34_0_Box_SSSE3,
+ ScaleRowDown34_0_Box_C, 4 / 3, 1, 23)
+SDANY(ScaleRowDown34_1_Box_Any_SSSE3, ScaleRowDown34_1_Box_SSSE3,
+ ScaleRowDown34_1_Box_C, 4 / 3, 1, 23)
+#endif
+#ifdef HAS_SCALEROWDOWN34_NEON
+SDANY(ScaleRowDown34_Any_NEON, ScaleRowDown34_NEON,
+ ScaleRowDown34_C, 4 / 3, 1, 23)
+SDANY(ScaleRowDown34_0_Box_Any_NEON, ScaleRowDown34_0_Box_NEON,
+ ScaleRowDown34_0_Box_C, 4 / 3, 1, 23)
+SDANY(ScaleRowDown34_1_Box_Any_NEON, ScaleRowDown34_1_Box_NEON,
+ ScaleRowDown34_1_Box_C, 4 / 3, 1, 23)
+#endif
+#ifdef HAS_SCALEROWDOWN38_SSSE3
+SDANY(ScaleRowDown38_Any_SSSE3, ScaleRowDown38_SSSE3,
+ ScaleRowDown38_C, 8 / 3, 1, 11)
+SDANY(ScaleRowDown38_3_Box_Any_SSSE3, ScaleRowDown38_3_Box_SSSE3,
+ ScaleRowDown38_3_Box_C, 8 / 3, 1, 5)
+SDANY(ScaleRowDown38_2_Box_Any_SSSE3, ScaleRowDown38_2_Box_SSSE3,
+ ScaleRowDown38_2_Box_C, 8 / 3, 1, 5)
+#endif
+#ifdef HAS_SCALEROWDOWN38_NEON
+SDANY(ScaleRowDown38_Any_NEON, ScaleRowDown38_NEON,
+ ScaleRowDown38_C, 8 / 3, 1, 11)
+SDANY(ScaleRowDown38_3_Box_Any_NEON, ScaleRowDown38_3_Box_NEON,
+ ScaleRowDown38_3_Box_C, 8 / 3, 1, 11)
+SDANY(ScaleRowDown38_2_Box_Any_NEON, ScaleRowDown38_2_Box_NEON,
+ ScaleRowDown38_2_Box_C, 8 / 3, 1, 11)
+#endif
+
+#ifdef HAS_SCALEARGBROWDOWN2_SSE2
+SDANY(ScaleARGBRowDown2_Any_SSE2, ScaleARGBRowDown2_SSE2,
+ ScaleARGBRowDown2_C, 2, 4, 3)
+SDANY(ScaleARGBRowDown2Linear_Any_SSE2, ScaleARGBRowDown2Linear_SSE2,
+ ScaleARGBRowDown2Linear_C, 2, 4, 3)
+SDANY(ScaleARGBRowDown2Box_Any_SSE2, ScaleARGBRowDown2Box_SSE2,
+ ScaleARGBRowDown2Box_C, 2, 4, 3)
+#endif
+#ifdef HAS_SCALEARGBROWDOWN2_NEON
+SDANY(ScaleARGBRowDown2_Any_NEON, ScaleARGBRowDown2_NEON,
+ ScaleARGBRowDown2_C, 2, 4, 7)
+SDANY(ScaleARGBRowDown2Linear_Any_NEON, ScaleARGBRowDown2Linear_NEON,
+ ScaleARGBRowDown2Linear_C, 2, 4, 7)
+SDANY(ScaleARGBRowDown2Box_Any_NEON, ScaleARGBRowDown2Box_NEON,
+ ScaleARGBRowDown2Box_C, 2, 4, 7)
+#endif
+#undef SDANY
+
+// Scale down by even scale factor.
+#define SDAANY(NAMEANY, SCALEROWDOWN_SIMD, SCALEROWDOWN_C, BPP, MASK) \
+ void NAMEANY(const uint8* src_ptr, ptrdiff_t src_stride, int src_stepx, \
+ uint8* dst_ptr, int dst_width) { \
+ int r = (int)((unsigned int)dst_width % (MASK + 1)); \
+ int n = dst_width - r; \
+ if (n > 0) { \
+ SCALEROWDOWN_SIMD(src_ptr, src_stride, src_stepx, dst_ptr, n); \
+ } \
+ SCALEROWDOWN_C(src_ptr + (n * src_stepx) * BPP, src_stride, \
+ src_stepx, dst_ptr + n * BPP, r); \
+ }
+
+#ifdef HAS_SCALEARGBROWDOWNEVEN_SSE2
+SDAANY(ScaleARGBRowDownEven_Any_SSE2, ScaleARGBRowDownEven_SSE2,
+ ScaleARGBRowDownEven_C, 4, 3)
+SDAANY(ScaleARGBRowDownEvenBox_Any_SSE2, ScaleARGBRowDownEvenBox_SSE2,
+ ScaleARGBRowDownEvenBox_C, 4, 3)
+#endif
+#ifdef HAS_SCALEARGBROWDOWNEVEN_NEON
+SDAANY(ScaleARGBRowDownEven_Any_NEON, ScaleARGBRowDownEven_NEON,
+ ScaleARGBRowDownEven_C, 4, 3)
+SDAANY(ScaleARGBRowDownEvenBox_Any_NEON, ScaleARGBRowDownEvenBox_NEON,
+ ScaleARGBRowDownEvenBox_C, 4, 3)
+#endif
+
+// Add rows box filter scale down.
+#define SAANY(NAMEANY, SCALEADDROW_SIMD, SCALEADDROW_C, MASK) \
+ void NAMEANY(const uint8* src_ptr, uint16* dst_ptr, int src_width) { \
+ int n = src_width & ~MASK; \
+ if (n > 0) { \
+ SCALEADDROW_SIMD(src_ptr, dst_ptr, n); \
+ } \
+ SCALEADDROW_C(src_ptr + n, dst_ptr + n, src_width & MASK); \
+ }
+
+#ifdef HAS_SCALEADDROW_SSE2
+SAANY(ScaleAddRow_Any_SSE2, ScaleAddRow_SSE2, ScaleAddRow_C, 15)
+#endif
+#ifdef HAS_SCALEADDROW_AVX2
+SAANY(ScaleAddRow_Any_AVX2, ScaleAddRow_AVX2, ScaleAddRow_C, 31)
+#endif
+#ifdef HAS_SCALEADDROW_NEON
+SAANY(ScaleAddRow_Any_NEON, ScaleAddRow_NEON, ScaleAddRow_C, 15)
+#endif
+#undef SAANY
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
+
+
+
+
diff --git a/media/libaom/src/third_party/libyuv/source/scale_argb.cc b/media/libaom/src/third_party/libyuv/source/scale_argb.cc
new file mode 100644
index 000000000..40a2d1ab2
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_argb.cc
@@ -0,0 +1,853 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/scale.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "libyuv/cpu_id.h"
+#include "libyuv/planar_functions.h" // For CopyARGB
+#include "libyuv/row.h"
+#include "libyuv/scale_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+static __inline int Abs(int v) {
+ return v >= 0 ? v : -v;
+}
+
+// ScaleARGB ARGB, 1/2
+// This is an optimized version for scaling down a ARGB to 1/2 of
+// its original size.
+static void ScaleARGBDown2(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int dx, int y, int dy,
+ enum FilterMode filtering) {
+ int j;
+ int row_stride = src_stride * (dy >> 16);
+ void (*ScaleARGBRowDown2)(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) =
+ filtering == kFilterNone ? ScaleARGBRowDown2_C :
+ (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_C :
+ ScaleARGBRowDown2Box_C);
+ assert(dx == 65536 * 2); // Test scale factor of 2.
+ assert((dy & 0x1ffff) == 0); // Test vertical scale is multiple of 2.
+ // Advance to odd row, even column.
+ if (filtering == kFilterBilinear) {
+ src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
+ } else {
+ src_argb += (y >> 16) * src_stride + ((x >> 16) - 1) * 4;
+ }
+
+#if defined(HAS_SCALEARGBROWDOWN2_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ScaleARGBRowDown2 = filtering == kFilterNone ? ScaleARGBRowDown2_Any_SSE2 :
+ (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_SSE2 :
+ ScaleARGBRowDown2Box_Any_SSE2);
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBRowDown2 = filtering == kFilterNone ? ScaleARGBRowDown2_SSE2 :
+ (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_SSE2 :
+ ScaleARGBRowDown2Box_SSE2);
+ }
+ }
+#endif
+#if defined(HAS_SCALEARGBROWDOWN2_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBRowDown2 = filtering == kFilterNone ? ScaleARGBRowDown2_Any_NEON :
+ (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_Any_NEON :
+ ScaleARGBRowDown2Box_Any_NEON);
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBRowDown2 = filtering == kFilterNone ? ScaleARGBRowDown2_NEON :
+ (filtering == kFilterLinear ? ScaleARGBRowDown2Linear_NEON :
+ ScaleARGBRowDown2Box_NEON);
+ }
+ }
+#endif
+
+ if (filtering == kFilterLinear) {
+ src_stride = 0;
+ }
+ for (j = 0; j < dst_height; ++j) {
+ ScaleARGBRowDown2(src_argb, src_stride, dst_argb, dst_width);
+ src_argb += row_stride;
+ dst_argb += dst_stride;
+ }
+}
+
+// ScaleARGB ARGB, 1/4
+// This is an optimized version for scaling down a ARGB to 1/4 of
+// its original size.
+static void ScaleARGBDown4Box(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int dx, int y, int dy) {
+ int j;
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (dst_width * 2 * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+ int row_stride = src_stride * (dy >> 16);
+ void (*ScaleARGBRowDown2)(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) = ScaleARGBRowDown2Box_C;
+ // Advance to odd row, even column.
+ src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
+ assert(dx == 65536 * 4); // Test scale factor of 4.
+ assert((dy & 0x3ffff) == 0); // Test vertical scale is multiple of 4.
+#if defined(HAS_SCALEARGBROWDOWN2_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_SSE2;
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBRowDown2 = ScaleARGBRowDown2Box_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEARGBROWDOWN2_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBRowDown2 = ScaleARGBRowDown2Box_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBRowDown2 = ScaleARGBRowDown2Box_NEON;
+ }
+ }
+#endif
+
+ for (j = 0; j < dst_height; ++j) {
+ ScaleARGBRowDown2(src_argb, src_stride, row, dst_width * 2);
+ ScaleARGBRowDown2(src_argb + src_stride * 2, src_stride,
+ row + kRowSize, dst_width * 2);
+ ScaleARGBRowDown2(row, kRowSize, dst_argb, dst_width);
+ src_argb += row_stride;
+ dst_argb += dst_stride;
+ }
+ free_aligned_buffer_64(row);
+}
+
+// ScaleARGB ARGB Even
+// This is an optimized version for scaling down a ARGB to even
+// multiple of its original size.
+static void ScaleARGBDownEven(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int dx, int y, int dy,
+ enum FilterMode filtering) {
+ int j;
+ int col_step = dx >> 16;
+ int row_stride = (dy >> 16) * src_stride;
+ void (*ScaleARGBRowDownEven)(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_step, uint8* dst_argb, int dst_width) =
+ filtering ? ScaleARGBRowDownEvenBox_C : ScaleARGBRowDownEven_C;
+ assert(IS_ALIGNED(src_width, 2));
+ assert(IS_ALIGNED(src_height, 2));
+ src_argb += (y >> 16) * src_stride + (x >> 16) * 4;
+#if defined(HAS_SCALEARGBROWDOWNEVEN_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_SSE2 :
+ ScaleARGBRowDownEven_Any_SSE2;
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_SSE2 :
+ ScaleARGBRowDownEven_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEARGBROWDOWNEVEN_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_Any_NEON :
+ ScaleARGBRowDownEven_Any_NEON;
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBRowDownEven = filtering ? ScaleARGBRowDownEvenBox_NEON :
+ ScaleARGBRowDownEven_NEON;
+ }
+ }
+#endif
+
+ if (filtering == kFilterLinear) {
+ src_stride = 0;
+ }
+ for (j = 0; j < dst_height; ++j) {
+ ScaleARGBRowDownEven(src_argb, src_stride, col_step, dst_argb, dst_width);
+ src_argb += row_stride;
+ dst_argb += dst_stride;
+ }
+}
+
+// Scale ARGB down with bilinear interpolation.
+static void ScaleARGBBilinearDown(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int dx, int y, int dy,
+ enum FilterMode filtering) {
+ int j;
+ void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_C;
+ void (*ScaleARGBFilterCols)(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) =
+ (src_width >= 32768) ? ScaleARGBFilterCols64_C : ScaleARGBFilterCols_C;
+ int64 xlast = x + (int64)(dst_width - 1) * dx;
+ int64 xl = (dx >= 0) ? x : xlast;
+ int64 xr = (dx >= 0) ? xlast : x;
+ int clip_src_width;
+ xl = (xl >> 16) & ~3; // Left edge aligned.
+ xr = (xr >> 16) + 1; // Right most pixel used. Bilinear uses 2 pixels.
+ xr = (xr + 1 + 3) & ~3; // 1 beyond 4 pixel aligned right most pixel.
+ if (xr > src_width) {
+ xr = src_width;
+ }
+ clip_src_width = (int)(xr - xl) * 4; // Width aligned to 4.
+ src_argb += xl * 4;
+ x -= (int)(xl << 16);
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(clip_src_width, 16)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(clip_src_width, 16)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(clip_src_width, 32)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(clip_src_width, 16)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4)) {
+ InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+ if (IS_ALIGNED(clip_src_width, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+ }
+#endif
+#if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
+ }
+#endif
+#if defined(HAS_SCALEARGBFILTERCOLS_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
+ }
+ }
+#endif
+ // TODO(fbarchard): Consider not allocating row buffer for kFilterLinear.
+ // Allocate a row of ARGB.
+ {
+ align_buffer_64(row, clip_src_width * 4);
+
+ const int max_y = (src_height - 1) << 16;
+ if (y > max_y) {
+ y = max_y;
+ }
+ for (j = 0; j < dst_height; ++j) {
+ int yi = y >> 16;
+ const uint8* src = src_argb + yi * src_stride;
+ if (filtering == kFilterLinear) {
+ ScaleARGBFilterCols(dst_argb, src, dst_width, x, dx);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow(row, src, src_stride, clip_src_width, yf);
+ ScaleARGBFilterCols(dst_argb, row, dst_width, x, dx);
+ }
+ dst_argb += dst_stride;
+ y += dy;
+ if (y > max_y) {
+ y = max_y;
+ }
+ }
+ free_aligned_buffer_64(row);
+ }
+}
+
+// Scale ARGB up with bilinear interpolation.
+static void ScaleARGBBilinearUp(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int dx, int y, int dy,
+ enum FilterMode filtering) {
+ int j;
+ void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_C;
+ void (*ScaleARGBFilterCols)(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) =
+ filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
+ const int max_y = (src_height - 1) << 16;
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(dst_width, 8)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+#endif
+ if (src_width >= 32768) {
+ ScaleARGBFilterCols = filtering ?
+ ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
+ }
+#if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
+ if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
+ }
+#endif
+#if defined(HAS_SCALEARGBFILTERCOLS_NEON)
+ if (filtering && TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SCALEARGBCOLS_SSE2)
+ if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
+ ScaleARGBFilterCols = ScaleARGBCols_SSE2;
+ }
+#endif
+#if defined(HAS_SCALEARGBCOLS_NEON)
+ if (!filtering && TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBFilterCols = ScaleARGBCols_NEON;
+ }
+ }
+#endif
+ if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
+ ScaleARGBFilterCols = ScaleARGBColsUp2_C;
+#if defined(HAS_SCALEARGBCOLSUP2_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
+ }
+#endif
+ }
+
+ if (y > max_y) {
+ y = max_y;
+ }
+
+ {
+ int yi = y >> 16;
+ const uint8* src = src_argb + yi * src_stride;
+
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (dst_width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+
+ uint8* rowptr = row;
+ int rowstride = kRowSize;
+ int lasty = yi;
+
+ ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
+ if (src_height > 1) {
+ src += src_stride;
+ }
+ ScaleARGBFilterCols(rowptr + rowstride, src, dst_width, x, dx);
+ src += src_stride;
+
+ for (j = 0; j < dst_height; ++j) {
+ yi = y >> 16;
+ if (yi != lasty) {
+ if (y > max_y) {
+ y = max_y;
+ yi = y >> 16;
+ src = src_argb + yi * src_stride;
+ }
+ if (yi != lasty) {
+ ScaleARGBFilterCols(rowptr, src, dst_width, x, dx);
+ rowptr += rowstride;
+ rowstride = -rowstride;
+ lasty = yi;
+ src += src_stride;
+ }
+ }
+ if (filtering == kFilterLinear) {
+ InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
+ }
+ dst_argb += dst_stride;
+ y += dy;
+ }
+ free_aligned_buffer_64(row);
+ }
+}
+
+#ifdef YUVSCALEUP
+// Scale YUV to ARGB up with bilinear interpolation.
+static void ScaleYUVToARGBBilinearUp(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride_y,
+ int src_stride_u,
+ int src_stride_v,
+ int dst_stride_argb,
+ const uint8* src_y,
+ const uint8* src_u,
+ const uint8* src_v,
+ uint8* dst_argb,
+ int x, int dx, int y, int dy,
+ enum FilterMode filtering) {
+ int j;
+ void (*I422ToARGBRow)(const uint8* y_buf,
+ const uint8* u_buf,
+ const uint8* v_buf,
+ uint8* rgb_buf,
+ int width) = I422ToARGBRow_C;
+#if defined(HAS_I422TOARGBROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ I422ToARGBRow = I422ToARGBRow_Any_SSSE3;
+ if (IS_ALIGNED(src_width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ I422ToARGBRow = I422ToARGBRow_Any_AVX2;
+ if (IS_ALIGNED(src_width, 16)) {
+ I422ToARGBRow = I422ToARGBRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ I422ToARGBRow = I422ToARGBRow_Any_NEON;
+ if (IS_ALIGNED(src_width, 8)) {
+ I422ToARGBRow = I422ToARGBRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_I422TOARGBROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) && IS_ALIGNED(src_width, 4) &&
+ IS_ALIGNED(src_y, 4) && IS_ALIGNED(src_stride_y, 4) &&
+ IS_ALIGNED(src_u, 2) && IS_ALIGNED(src_stride_u, 2) &&
+ IS_ALIGNED(src_v, 2) && IS_ALIGNED(src_stride_v, 2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ I422ToARGBRow = I422ToARGBRow_MIPS_DSPR2;
+ }
+#endif
+
+ void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_C;
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(dst_width, 8)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(dst_width, 4)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride_argb, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+#endif
+
+ void (*ScaleARGBFilterCols)(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) =
+ filtering ? ScaleARGBFilterCols_C : ScaleARGBCols_C;
+ if (src_width >= 32768) {
+ ScaleARGBFilterCols = filtering ?
+ ScaleARGBFilterCols64_C : ScaleARGBCols64_C;
+ }
+#if defined(HAS_SCALEARGBFILTERCOLS_SSSE3)
+ if (filtering && TestCpuFlag(kCpuHasSSSE3) && src_width < 32768) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_SSSE3;
+ }
+#endif
+#if defined(HAS_SCALEARGBFILTERCOLS_NEON)
+ if (filtering && TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 4)) {
+ ScaleARGBFilterCols = ScaleARGBFilterCols_NEON;
+ }
+ }
+#endif
+#if defined(HAS_SCALEARGBCOLS_SSE2)
+ if (!filtering && TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
+ ScaleARGBFilterCols = ScaleARGBCols_SSE2;
+ }
+#endif
+#if defined(HAS_SCALEARGBCOLS_NEON)
+ if (!filtering && TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBFilterCols = ScaleARGBCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBFilterCols = ScaleARGBCols_NEON;
+ }
+ }
+#endif
+ if (!filtering && src_width * 2 == dst_width && x < 0x8000) {
+ ScaleARGBFilterCols = ScaleARGBColsUp2_C;
+#if defined(HAS_SCALEARGBCOLSUP2_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBFilterCols = ScaleARGBColsUp2_SSE2;
+ }
+#endif
+ }
+
+ const int max_y = (src_height - 1) << 16;
+ if (y > max_y) {
+ y = max_y;
+ }
+ const int kYShift = 1; // Shift Y by 1 to convert Y plane to UV coordinate.
+ int yi = y >> 16;
+ int uv_yi = yi >> kYShift;
+ const uint8* src_row_y = src_y + yi * src_stride_y;
+ const uint8* src_row_u = src_u + uv_yi * src_stride_u;
+ const uint8* src_row_v = src_v + uv_yi * src_stride_v;
+
+ // Allocate 2 rows of ARGB.
+ const int kRowSize = (dst_width * 4 + 31) & ~31;
+ align_buffer_64(row, kRowSize * 2);
+
+ // Allocate 1 row of ARGB for source conversion.
+ align_buffer_64(argb_row, src_width * 4);
+
+ uint8* rowptr = row;
+ int rowstride = kRowSize;
+ int lasty = yi;
+
+ // TODO(fbarchard): Convert first 2 rows of YUV to ARGB.
+ ScaleARGBFilterCols(rowptr, src_row_y, dst_width, x, dx);
+ if (src_height > 1) {
+ src_row_y += src_stride_y;
+ if (yi & 1) {
+ src_row_u += src_stride_u;
+ src_row_v += src_stride_v;
+ }
+ }
+ ScaleARGBFilterCols(rowptr + rowstride, src_row_y, dst_width, x, dx);
+ if (src_height > 2) {
+ src_row_y += src_stride_y;
+ if (!(yi & 1)) {
+ src_row_u += src_stride_u;
+ src_row_v += src_stride_v;
+ }
+ }
+
+ for (j = 0; j < dst_height; ++j) {
+ yi = y >> 16;
+ if (yi != lasty) {
+ if (y > max_y) {
+ y = max_y;
+ yi = y >> 16;
+ uv_yi = yi >> kYShift;
+ src_row_y = src_y + yi * src_stride_y;
+ src_row_u = src_u + uv_yi * src_stride_u;
+ src_row_v = src_v + uv_yi * src_stride_v;
+ }
+ if (yi != lasty) {
+ // TODO(fbarchard): Convert the clipped region of row.
+ I422ToARGBRow(src_row_y, src_row_u, src_row_v, argb_row, src_width);
+ ScaleARGBFilterCols(rowptr, argb_row, dst_width, x, dx);
+ rowptr += rowstride;
+ rowstride = -rowstride;
+ lasty = yi;
+ src_row_y += src_stride_y;
+ if (yi & 1) {
+ src_row_u += src_stride_u;
+ src_row_v += src_stride_v;
+ }
+ }
+ }
+ if (filtering == kFilterLinear) {
+ InterpolateRow(dst_argb, rowptr, 0, dst_width * 4, 0);
+ } else {
+ int yf = (y >> 8) & 255;
+ InterpolateRow(dst_argb, rowptr, rowstride, dst_width * 4, yf);
+ }
+ dst_argb += dst_stride_argb;
+ y += dy;
+ }
+ free_aligned_buffer_64(row);
+ free_aligned_buffer_64(row_argb);
+}
+#endif
+
+// Scale ARGB to/from any dimensions, without interpolation.
+// Fixed point math is used for performance: The upper 16 bits
+// of x and dx is the integer part of the source position and
+// the lower 16 bits are the fixed decimal part.
+
+static void ScaleARGBSimple(int src_width, int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int dx, int y, int dy) {
+ int j;
+ void (*ScaleARGBCols)(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) =
+ (src_width >= 32768) ? ScaleARGBCols64_C : ScaleARGBCols_C;
+#if defined(HAS_SCALEARGBCOLS_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && src_width < 32768) {
+ ScaleARGBCols = ScaleARGBCols_SSE2;
+ }
+#endif
+#if defined(HAS_SCALEARGBCOLS_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ ScaleARGBCols = ScaleARGBCols_Any_NEON;
+ if (IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBCols = ScaleARGBCols_NEON;
+ }
+ }
+#endif
+ if (src_width * 2 == dst_width && x < 0x8000) {
+ ScaleARGBCols = ScaleARGBColsUp2_C;
+#if defined(HAS_SCALEARGBCOLSUP2_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2) && IS_ALIGNED(dst_width, 8)) {
+ ScaleARGBCols = ScaleARGBColsUp2_SSE2;
+ }
+#endif
+ }
+
+ for (j = 0; j < dst_height; ++j) {
+ ScaleARGBCols(dst_argb, src_argb + (y >> 16) * src_stride,
+ dst_width, x, dx);
+ dst_argb += dst_stride;
+ y += dy;
+ }
+}
+
+// ScaleARGB a ARGB.
+// This function in turn calls a scaling function
+// suitable for handling the desired resolutions.
+static void ScaleARGB(const uint8* src, int src_stride,
+ int src_width, int src_height,
+ uint8* dst, int dst_stride,
+ int dst_width, int dst_height,
+ int clip_x, int clip_y, int clip_width, int clip_height,
+ enum FilterMode filtering) {
+ // Initial source x/y coordinate and step values as 16.16 fixed point.
+ int x = 0;
+ int y = 0;
+ int dx = 0;
+ int dy = 0;
+ // ARGB does not support box filter yet, but allow the user to pass it.
+ // Simplify filtering when possible.
+ filtering = ScaleFilterReduce(src_width, src_height,
+ dst_width, dst_height,
+ filtering);
+
+ // Negative src_height means invert the image.
+ if (src_height < 0) {
+ src_height = -src_height;
+ src = src + (src_height - 1) * src_stride;
+ src_stride = -src_stride;
+ }
+ ScaleSlope(src_width, src_height, dst_width, dst_height, filtering,
+ &x, &y, &dx, &dy);
+ src_width = Abs(src_width);
+ if (clip_x) {
+ int64 clipf = (int64)(clip_x) * dx;
+ x += (clipf & 0xffff);
+ src += (clipf >> 16) * 4;
+ dst += clip_x * 4;
+ }
+ if (clip_y) {
+ int64 clipf = (int64)(clip_y) * dy;
+ y += (clipf & 0xffff);
+ src += (clipf >> 16) * src_stride;
+ dst += clip_y * dst_stride;
+ }
+
+ // Special case for integer step values.
+ if (((dx | dy) & 0xffff) == 0) {
+ if (!dx || !dy) { // 1 pixel wide and/or tall.
+ filtering = kFilterNone;
+ } else {
+ // Optimized even scale down. ie 2, 4, 6, 8, 10x.
+ if (!(dx & 0x10000) && !(dy & 0x10000)) {
+ if (dx == 0x20000) {
+ // Optimized 1/2 downsample.
+ ScaleARGBDown2(src_width, src_height,
+ clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, dx, y, dy, filtering);
+ return;
+ }
+ if (dx == 0x40000 && filtering == kFilterBox) {
+ // Optimized 1/4 box downsample.
+ ScaleARGBDown4Box(src_width, src_height,
+ clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, dx, y, dy);
+ return;
+ }
+ ScaleARGBDownEven(src_width, src_height,
+ clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, dx, y, dy, filtering);
+ return;
+ }
+ // Optimized odd scale down. ie 3, 5, 7, 9x.
+ if ((dx & 0x10000) && (dy & 0x10000)) {
+ filtering = kFilterNone;
+ if (dx == 0x10000 && dy == 0x10000) {
+ // Straight copy.
+ ARGBCopy(src + (y >> 16) * src_stride + (x >> 16) * 4, src_stride,
+ dst, dst_stride, clip_width, clip_height);
+ return;
+ }
+ }
+ }
+ }
+ if (dx == 0x10000 && (x & 0xffff) == 0) {
+ // Arbitrary scale vertically, but unscaled vertically.
+ ScalePlaneVertical(src_height,
+ clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, y, dy, 4, filtering);
+ return;
+ }
+ if (filtering && dy < 65536) {
+ ScaleARGBBilinearUp(src_width, src_height,
+ clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, dx, y, dy, filtering);
+ return;
+ }
+ if (filtering) {
+ ScaleARGBBilinearDown(src_width, src_height,
+ clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, dx, y, dy, filtering);
+ return;
+ }
+ ScaleARGBSimple(src_width, src_height, clip_width, clip_height,
+ src_stride, dst_stride, src, dst,
+ x, dx, y, dy);
+}
+
+LIBYUV_API
+int ARGBScaleClip(const uint8* src_argb, int src_stride_argb,
+ int src_width, int src_height,
+ uint8* dst_argb, int dst_stride_argb,
+ int dst_width, int dst_height,
+ int clip_x, int clip_y, int clip_width, int clip_height,
+ enum FilterMode filtering) {
+ if (!src_argb || src_width == 0 || src_height == 0 ||
+ !dst_argb || dst_width <= 0 || dst_height <= 0 ||
+ clip_x < 0 || clip_y < 0 ||
+ clip_width > 32768 || clip_height > 32768 ||
+ (clip_x + clip_width) > dst_width ||
+ (clip_y + clip_height) > dst_height) {
+ return -1;
+ }
+ ScaleARGB(src_argb, src_stride_argb, src_width, src_height,
+ dst_argb, dst_stride_argb, dst_width, dst_height,
+ clip_x, clip_y, clip_width, clip_height, filtering);
+ return 0;
+}
+
+// Scale an ARGB image.
+LIBYUV_API
+int ARGBScale(const uint8* src_argb, int src_stride_argb,
+ int src_width, int src_height,
+ uint8* dst_argb, int dst_stride_argb,
+ int dst_width, int dst_height,
+ enum FilterMode filtering) {
+ if (!src_argb || src_width == 0 || src_height == 0 ||
+ src_width > 32768 || src_height > 32768 ||
+ !dst_argb || dst_width <= 0 || dst_height <= 0) {
+ return -1;
+ }
+ ScaleARGB(src_argb, src_stride_argb, src_width, src_height,
+ dst_argb, dst_stride_argb, dst_width, dst_height,
+ 0, 0, dst_width, dst_height, filtering);
+ return 0;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/scale_common.cc b/media/libaom/src/third_party/libyuv/source/scale_common.cc
new file mode 100644
index 000000000..1711f3d54
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_common.cc
@@ -0,0 +1,1137 @@
+/*
+ * Copyright 2013 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/scale.h"
+
+#include <assert.h>
+#include <string.h>
+
+#include "libyuv/cpu_id.h"
+#include "libyuv/planar_functions.h" // For CopyARGB
+#include "libyuv/row.h"
+#include "libyuv/scale_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+static __inline int Abs(int v) {
+ return v >= 0 ? v : -v;
+}
+
+// CPU agnostic row functions
+void ScaleRowDown2_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = src_ptr[1];
+ dst[1] = src_ptr[3];
+ dst += 2;
+ src_ptr += 4;
+ }
+ if (dst_width & 1) {
+ dst[0] = src_ptr[1];
+ }
+}
+
+void ScaleRowDown2_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = src_ptr[1];
+ dst[1] = src_ptr[3];
+ dst += 2;
+ src_ptr += 4;
+ }
+ if (dst_width & 1) {
+ dst[0] = src_ptr[1];
+ }
+}
+
+void ScaleRowDown2Linear_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ const uint8* s = src_ptr;
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = (s[0] + s[1] + 1) >> 1;
+ dst[1] = (s[2] + s[3] + 1) >> 1;
+ dst += 2;
+ s += 4;
+ }
+ if (dst_width & 1) {
+ dst[0] = (s[0] + s[1] + 1) >> 1;
+ }
+}
+
+void ScaleRowDown2Linear_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ const uint16* s = src_ptr;
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = (s[0] + s[1] + 1) >> 1;
+ dst[1] = (s[2] + s[3] + 1) >> 1;
+ dst += 2;
+ s += 4;
+ }
+ if (dst_width & 1) {
+ dst[0] = (s[0] + s[1] + 1) >> 1;
+ }
+}
+
+void ScaleRowDown2Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ const uint8* s = src_ptr;
+ const uint8* t = src_ptr + src_stride;
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = (s[0] + s[1] + t[0] + t[1] + 2) >> 2;
+ dst[1] = (s[2] + s[3] + t[2] + t[3] + 2) >> 2;
+ dst += 2;
+ s += 4;
+ t += 4;
+ }
+ if (dst_width & 1) {
+ dst[0] = (s[0] + s[1] + t[0] + t[1] + 2) >> 2;
+ }
+}
+
+void ScaleRowDown2Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ const uint16* s = src_ptr;
+ const uint16* t = src_ptr + src_stride;
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = (s[0] + s[1] + t[0] + t[1] + 2) >> 2;
+ dst[1] = (s[2] + s[3] + t[2] + t[3] + 2) >> 2;
+ dst += 2;
+ s += 4;
+ t += 4;
+ }
+ if (dst_width & 1) {
+ dst[0] = (s[0] + s[1] + t[0] + t[1] + 2) >> 2;
+ }
+}
+
+void ScaleRowDown4_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = src_ptr[2];
+ dst[1] = src_ptr[6];
+ dst += 2;
+ src_ptr += 8;
+ }
+ if (dst_width & 1) {
+ dst[0] = src_ptr[2];
+ }
+}
+
+void ScaleRowDown4_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = src_ptr[2];
+ dst[1] = src_ptr[6];
+ dst += 2;
+ src_ptr += 8;
+ }
+ if (dst_width & 1) {
+ dst[0] = src_ptr[2];
+ }
+}
+
+void ScaleRowDown4Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ intptr_t stride = src_stride;
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] + src_ptr[3] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2] + src_ptr[stride + 3] +
+ src_ptr[stride * 2 + 0] + src_ptr[stride * 2 + 1] +
+ src_ptr[stride * 2 + 2] + src_ptr[stride * 2 + 3] +
+ src_ptr[stride * 3 + 0] + src_ptr[stride * 3 + 1] +
+ src_ptr[stride * 3 + 2] + src_ptr[stride * 3 + 3] +
+ 8) >> 4;
+ dst[1] = (src_ptr[4] + src_ptr[5] + src_ptr[6] + src_ptr[7] +
+ src_ptr[stride + 4] + src_ptr[stride + 5] +
+ src_ptr[stride + 6] + src_ptr[stride + 7] +
+ src_ptr[stride * 2 + 4] + src_ptr[stride * 2 + 5] +
+ src_ptr[stride * 2 + 6] + src_ptr[stride * 2 + 7] +
+ src_ptr[stride * 3 + 4] + src_ptr[stride * 3 + 5] +
+ src_ptr[stride * 3 + 6] + src_ptr[stride * 3 + 7] +
+ 8) >> 4;
+ dst += 2;
+ src_ptr += 8;
+ }
+ if (dst_width & 1) {
+ dst[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] + src_ptr[3] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2] + src_ptr[stride + 3] +
+ src_ptr[stride * 2 + 0] + src_ptr[stride * 2 + 1] +
+ src_ptr[stride * 2 + 2] + src_ptr[stride * 2 + 3] +
+ src_ptr[stride * 3 + 0] + src_ptr[stride * 3 + 1] +
+ src_ptr[stride * 3 + 2] + src_ptr[stride * 3 + 3] +
+ 8) >> 4;
+ }
+}
+
+void ScaleRowDown4Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ intptr_t stride = src_stride;
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] + src_ptr[3] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2] + src_ptr[stride + 3] +
+ src_ptr[stride * 2 + 0] + src_ptr[stride * 2 + 1] +
+ src_ptr[stride * 2 + 2] + src_ptr[stride * 2 + 3] +
+ src_ptr[stride * 3 + 0] + src_ptr[stride * 3 + 1] +
+ src_ptr[stride * 3 + 2] + src_ptr[stride * 3 + 3] +
+ 8) >> 4;
+ dst[1] = (src_ptr[4] + src_ptr[5] + src_ptr[6] + src_ptr[7] +
+ src_ptr[stride + 4] + src_ptr[stride + 5] +
+ src_ptr[stride + 6] + src_ptr[stride + 7] +
+ src_ptr[stride * 2 + 4] + src_ptr[stride * 2 + 5] +
+ src_ptr[stride * 2 + 6] + src_ptr[stride * 2 + 7] +
+ src_ptr[stride * 3 + 4] + src_ptr[stride * 3 + 5] +
+ src_ptr[stride * 3 + 6] + src_ptr[stride * 3 + 7] +
+ 8) >> 4;
+ dst += 2;
+ src_ptr += 8;
+ }
+ if (dst_width & 1) {
+ dst[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] + src_ptr[3] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2] + src_ptr[stride + 3] +
+ src_ptr[stride * 2 + 0] + src_ptr[stride * 2 + 1] +
+ src_ptr[stride * 2 + 2] + src_ptr[stride * 2 + 3] +
+ src_ptr[stride * 3 + 0] + src_ptr[stride * 3 + 1] +
+ src_ptr[stride * 3 + 2] + src_ptr[stride * 3 + 3] +
+ 8) >> 4;
+ }
+}
+
+void ScaleRowDown34_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ int x;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (x = 0; x < dst_width; x += 3) {
+ dst[0] = src_ptr[0];
+ dst[1] = src_ptr[1];
+ dst[2] = src_ptr[3];
+ dst += 3;
+ src_ptr += 4;
+ }
+}
+
+void ScaleRowDown34_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ int x;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (x = 0; x < dst_width; x += 3) {
+ dst[0] = src_ptr[0];
+ dst[1] = src_ptr[1];
+ dst[2] = src_ptr[3];
+ dst += 3;
+ src_ptr += 4;
+ }
+}
+
+// Filter rows 0 and 1 together, 3 : 1
+void ScaleRowDown34_0_Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width) {
+ const uint8* s = src_ptr;
+ const uint8* t = src_ptr + src_stride;
+ int x;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (x = 0; x < dst_width; x += 3) {
+ uint8 a0 = (s[0] * 3 + s[1] * 1 + 2) >> 2;
+ uint8 a1 = (s[1] * 1 + s[2] * 1 + 1) >> 1;
+ uint8 a2 = (s[2] * 1 + s[3] * 3 + 2) >> 2;
+ uint8 b0 = (t[0] * 3 + t[1] * 1 + 2) >> 2;
+ uint8 b1 = (t[1] * 1 + t[2] * 1 + 1) >> 1;
+ uint8 b2 = (t[2] * 1 + t[3] * 3 + 2) >> 2;
+ d[0] = (a0 * 3 + b0 + 2) >> 2;
+ d[1] = (a1 * 3 + b1 + 2) >> 2;
+ d[2] = (a2 * 3 + b2 + 2) >> 2;
+ d += 3;
+ s += 4;
+ t += 4;
+ }
+}
+
+void ScaleRowDown34_0_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* d, int dst_width) {
+ const uint16* s = src_ptr;
+ const uint16* t = src_ptr + src_stride;
+ int x;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (x = 0; x < dst_width; x += 3) {
+ uint16 a0 = (s[0] * 3 + s[1] * 1 + 2) >> 2;
+ uint16 a1 = (s[1] * 1 + s[2] * 1 + 1) >> 1;
+ uint16 a2 = (s[2] * 1 + s[3] * 3 + 2) >> 2;
+ uint16 b0 = (t[0] * 3 + t[1] * 1 + 2) >> 2;
+ uint16 b1 = (t[1] * 1 + t[2] * 1 + 1) >> 1;
+ uint16 b2 = (t[2] * 1 + t[3] * 3 + 2) >> 2;
+ d[0] = (a0 * 3 + b0 + 2) >> 2;
+ d[1] = (a1 * 3 + b1 + 2) >> 2;
+ d[2] = (a2 * 3 + b2 + 2) >> 2;
+ d += 3;
+ s += 4;
+ t += 4;
+ }
+}
+
+// Filter rows 1 and 2 together, 1 : 1
+void ScaleRowDown34_1_Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width) {
+ const uint8* s = src_ptr;
+ const uint8* t = src_ptr + src_stride;
+ int x;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (x = 0; x < dst_width; x += 3) {
+ uint8 a0 = (s[0] * 3 + s[1] * 1 + 2) >> 2;
+ uint8 a1 = (s[1] * 1 + s[2] * 1 + 1) >> 1;
+ uint8 a2 = (s[2] * 1 + s[3] * 3 + 2) >> 2;
+ uint8 b0 = (t[0] * 3 + t[1] * 1 + 2) >> 2;
+ uint8 b1 = (t[1] * 1 + t[2] * 1 + 1) >> 1;
+ uint8 b2 = (t[2] * 1 + t[3] * 3 + 2) >> 2;
+ d[0] = (a0 + b0 + 1) >> 1;
+ d[1] = (a1 + b1 + 1) >> 1;
+ d[2] = (a2 + b2 + 1) >> 1;
+ d += 3;
+ s += 4;
+ t += 4;
+ }
+}
+
+void ScaleRowDown34_1_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* d, int dst_width) {
+ const uint16* s = src_ptr;
+ const uint16* t = src_ptr + src_stride;
+ int x;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (x = 0; x < dst_width; x += 3) {
+ uint16 a0 = (s[0] * 3 + s[1] * 1 + 2) >> 2;
+ uint16 a1 = (s[1] * 1 + s[2] * 1 + 1) >> 1;
+ uint16 a2 = (s[2] * 1 + s[3] * 3 + 2) >> 2;
+ uint16 b0 = (t[0] * 3 + t[1] * 1 + 2) >> 2;
+ uint16 b1 = (t[1] * 1 + t[2] * 1 + 1) >> 1;
+ uint16 b2 = (t[2] * 1 + t[3] * 3 + 2) >> 2;
+ d[0] = (a0 + b0 + 1) >> 1;
+ d[1] = (a1 + b1 + 1) >> 1;
+ d[2] = (a2 + b2 + 1) >> 1;
+ d += 3;
+ s += 4;
+ t += 4;
+ }
+}
+
+// Scales a single row of pixels using point sampling.
+void ScaleCols_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst_ptr[0] = src_ptr[x >> 16];
+ x += dx;
+ dst_ptr[1] = src_ptr[x >> 16];
+ x += dx;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ dst_ptr[0] = src_ptr[x >> 16];
+ }
+}
+
+void ScaleCols_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx) {
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst_ptr[0] = src_ptr[x >> 16];
+ x += dx;
+ dst_ptr[1] = src_ptr[x >> 16];
+ x += dx;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ dst_ptr[0] = src_ptr[x >> 16];
+ }
+}
+
+// Scales a single row of pixels up by 2x using point sampling.
+void ScaleColsUp2_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst_ptr[1] = dst_ptr[0] = src_ptr[0];
+ src_ptr += 1;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ dst_ptr[0] = src_ptr[0];
+ }
+}
+
+void ScaleColsUp2_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx) {
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst_ptr[1] = dst_ptr[0] = src_ptr[0];
+ src_ptr += 1;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ dst_ptr[0] = src_ptr[0];
+ }
+}
+
+// (1-f)a + fb can be replaced with a + f(b-a)
+#define BLENDER(a, b, f) (uint8)((int)(a) + \
+ ((int)(f) * ((int)(b) - (int)(a)) >> 16))
+
+void ScaleFilterCols_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ int xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ xi = x >> 16;
+ a = src_ptr[xi];
+ b = src_ptr[xi + 1];
+ dst_ptr[1] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ int xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ }
+}
+
+void ScaleFilterCols64_C(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x32, int dx) {
+ int64 x = (int64)(x32);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ int64 xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ xi = x >> 16;
+ a = src_ptr[xi];
+ b = src_ptr[xi + 1];
+ dst_ptr[1] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ int64 xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ }
+}
+#undef BLENDER
+
+#define BLENDER(a, b, f) (uint16)((int)(a) + \
+ ((int)(f) * ((int)(b) - (int)(a)) >> 16))
+
+void ScaleFilterCols_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x, int dx) {
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ int xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ xi = x >> 16;
+ a = src_ptr[xi];
+ b = src_ptr[xi + 1];
+ dst_ptr[1] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ int xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ }
+}
+
+void ScaleFilterCols64_16_C(uint16* dst_ptr, const uint16* src_ptr,
+ int dst_width, int x32, int dx) {
+ int64 x = (int64)(x32);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ int64 xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ xi = x >> 16;
+ a = src_ptr[xi];
+ b = src_ptr[xi + 1];
+ dst_ptr[1] = BLENDER(a, b, x & 0xffff);
+ x += dx;
+ dst_ptr += 2;
+ }
+ if (dst_width & 1) {
+ int64 xi = x >> 16;
+ int a = src_ptr[xi];
+ int b = src_ptr[xi + 1];
+ dst_ptr[0] = BLENDER(a, b, x & 0xffff);
+ }
+}
+#undef BLENDER
+
+void ScaleRowDown38_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ int x;
+ assert(dst_width % 3 == 0);
+ for (x = 0; x < dst_width; x += 3) {
+ dst[0] = src_ptr[0];
+ dst[1] = src_ptr[3];
+ dst[2] = src_ptr[6];
+ dst += 3;
+ src_ptr += 8;
+ }
+}
+
+void ScaleRowDown38_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst, int dst_width) {
+ int x;
+ assert(dst_width % 3 == 0);
+ for (x = 0; x < dst_width; x += 3) {
+ dst[0] = src_ptr[0];
+ dst[1] = src_ptr[3];
+ dst[2] = src_ptr[6];
+ dst += 3;
+ src_ptr += 8;
+ }
+}
+
+// 8x3 -> 3x1
+void ScaleRowDown38_3_Box_C(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ intptr_t stride = src_stride;
+ int i;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (i = 0; i < dst_width; i += 3) {
+ dst_ptr[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2] + src_ptr[stride * 2 + 0] +
+ src_ptr[stride * 2 + 1] + src_ptr[stride * 2 + 2]) *
+ (65536 / 9) >> 16;
+ dst_ptr[1] = (src_ptr[3] + src_ptr[4] + src_ptr[5] +
+ src_ptr[stride + 3] + src_ptr[stride + 4] +
+ src_ptr[stride + 5] + src_ptr[stride * 2 + 3] +
+ src_ptr[stride * 2 + 4] + src_ptr[stride * 2 + 5]) *
+ (65536 / 9) >> 16;
+ dst_ptr[2] = (src_ptr[6] + src_ptr[7] +
+ src_ptr[stride + 6] + src_ptr[stride + 7] +
+ src_ptr[stride * 2 + 6] + src_ptr[stride * 2 + 7]) *
+ (65536 / 6) >> 16;
+ src_ptr += 8;
+ dst_ptr += 3;
+ }
+}
+
+void ScaleRowDown38_3_Box_16_C(const uint16* src_ptr,
+ ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width) {
+ intptr_t stride = src_stride;
+ int i;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (i = 0; i < dst_width; i += 3) {
+ dst_ptr[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2] + src_ptr[stride * 2 + 0] +
+ src_ptr[stride * 2 + 1] + src_ptr[stride * 2 + 2]) *
+ (65536 / 9) >> 16;
+ dst_ptr[1] = (src_ptr[3] + src_ptr[4] + src_ptr[5] +
+ src_ptr[stride + 3] + src_ptr[stride + 4] +
+ src_ptr[stride + 5] + src_ptr[stride * 2 + 3] +
+ src_ptr[stride * 2 + 4] + src_ptr[stride * 2 + 5]) *
+ (65536 / 9) >> 16;
+ dst_ptr[2] = (src_ptr[6] + src_ptr[7] +
+ src_ptr[stride + 6] + src_ptr[stride + 7] +
+ src_ptr[stride * 2 + 6] + src_ptr[stride * 2 + 7]) *
+ (65536 / 6) >> 16;
+ src_ptr += 8;
+ dst_ptr += 3;
+ }
+}
+
+// 8x2 -> 3x1
+void ScaleRowDown38_2_Box_C(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ intptr_t stride = src_stride;
+ int i;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (i = 0; i < dst_width; i += 3) {
+ dst_ptr[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2]) * (65536 / 6) >> 16;
+ dst_ptr[1] = (src_ptr[3] + src_ptr[4] + src_ptr[5] +
+ src_ptr[stride + 3] + src_ptr[stride + 4] +
+ src_ptr[stride + 5]) * (65536 / 6) >> 16;
+ dst_ptr[2] = (src_ptr[6] + src_ptr[7] +
+ src_ptr[stride + 6] + src_ptr[stride + 7]) *
+ (65536 / 4) >> 16;
+ src_ptr += 8;
+ dst_ptr += 3;
+ }
+}
+
+void ScaleRowDown38_2_Box_16_C(const uint16* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int dst_width) {
+ intptr_t stride = src_stride;
+ int i;
+ assert((dst_width % 3 == 0) && (dst_width > 0));
+ for (i = 0; i < dst_width; i += 3) {
+ dst_ptr[0] = (src_ptr[0] + src_ptr[1] + src_ptr[2] +
+ src_ptr[stride + 0] + src_ptr[stride + 1] +
+ src_ptr[stride + 2]) * (65536 / 6) >> 16;
+ dst_ptr[1] = (src_ptr[3] + src_ptr[4] + src_ptr[5] +
+ src_ptr[stride + 3] + src_ptr[stride + 4] +
+ src_ptr[stride + 5]) * (65536 / 6) >> 16;
+ dst_ptr[2] = (src_ptr[6] + src_ptr[7] +
+ src_ptr[stride + 6] + src_ptr[stride + 7]) *
+ (65536 / 4) >> 16;
+ src_ptr += 8;
+ dst_ptr += 3;
+ }
+}
+
+void ScaleAddRow_C(const uint8* src_ptr, uint16* dst_ptr, int src_width) {
+ int x;
+ assert(src_width > 0);
+ for (x = 0; x < src_width - 1; x += 2) {
+ dst_ptr[0] += src_ptr[0];
+ dst_ptr[1] += src_ptr[1];
+ src_ptr += 2;
+ dst_ptr += 2;
+ }
+ if (src_width & 1) {
+ dst_ptr[0] += src_ptr[0];
+ }
+}
+
+void ScaleAddRow_16_C(const uint16* src_ptr, uint32* dst_ptr, int src_width) {
+ int x;
+ assert(src_width > 0);
+ for (x = 0; x < src_width - 1; x += 2) {
+ dst_ptr[0] += src_ptr[0];
+ dst_ptr[1] += src_ptr[1];
+ src_ptr += 2;
+ dst_ptr += 2;
+ }
+ if (src_width & 1) {
+ dst_ptr[0] += src_ptr[0];
+ }
+}
+
+void ScaleARGBRowDown2_C(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = src[1];
+ dst[1] = src[3];
+ src += 4;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ dst[0] = src[1];
+ }
+}
+
+void ScaleARGBRowDown2Linear_C(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width; ++x) {
+ dst_argb[0] = (src_argb[0] + src_argb[4] + 1) >> 1;
+ dst_argb[1] = (src_argb[1] + src_argb[5] + 1) >> 1;
+ dst_argb[2] = (src_argb[2] + src_argb[6] + 1) >> 1;
+ dst_argb[3] = (src_argb[3] + src_argb[7] + 1) >> 1;
+ src_argb += 8;
+ dst_argb += 4;
+ }
+}
+
+void ScaleARGBRowDown2Box_C(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width; ++x) {
+ dst_argb[0] = (src_argb[0] + src_argb[4] +
+ src_argb[src_stride] + src_argb[src_stride + 4] + 2) >> 2;
+ dst_argb[1] = (src_argb[1] + src_argb[5] +
+ src_argb[src_stride + 1] + src_argb[src_stride + 5] + 2) >> 2;
+ dst_argb[2] = (src_argb[2] + src_argb[6] +
+ src_argb[src_stride + 2] + src_argb[src_stride + 6] + 2) >> 2;
+ dst_argb[3] = (src_argb[3] + src_argb[7] +
+ src_argb[src_stride + 3] + src_argb[src_stride + 7] + 2) >> 2;
+ src_argb += 8;
+ dst_argb += 4;
+ }
+}
+
+void ScaleARGBRowDownEven_C(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+
+ int x;
+ for (x = 0; x < dst_width - 1; x += 2) {
+ dst[0] = src[0];
+ dst[1] = src[src_stepx];
+ src += src_stepx * 2;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ dst[0] = src[0];
+ }
+}
+
+void ScaleARGBRowDownEvenBox_C(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ int x;
+ for (x = 0; x < dst_width; ++x) {
+ dst_argb[0] = (src_argb[0] + src_argb[4] +
+ src_argb[src_stride] + src_argb[src_stride + 4] + 2) >> 2;
+ dst_argb[1] = (src_argb[1] + src_argb[5] +
+ src_argb[src_stride + 1] + src_argb[src_stride + 5] + 2) >> 2;
+ dst_argb[2] = (src_argb[2] + src_argb[6] +
+ src_argb[src_stride + 2] + src_argb[src_stride + 6] + 2) >> 2;
+ dst_argb[3] = (src_argb[3] + src_argb[7] +
+ src_argb[src_stride + 3] + src_argb[src_stride + 7] + 2) >> 2;
+ src_argb += src_stepx * 4;
+ dst_argb += 4;
+ }
+}
+
+// Scales a single row of pixels using point sampling.
+void ScaleARGBCols_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst[0] = src[x >> 16];
+ x += dx;
+ dst[1] = src[x >> 16];
+ x += dx;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ dst[0] = src[x >> 16];
+ }
+}
+
+void ScaleARGBCols64_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x32, int dx) {
+ int64 x = (int64)(x32);
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst[0] = src[x >> 16];
+ x += dx;
+ dst[1] = src[x >> 16];
+ x += dx;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ dst[0] = src[x >> 16];
+ }
+}
+
+// Scales a single row of pixels up by 2x using point sampling.
+void ScaleARGBColsUp2_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ dst[1] = dst[0] = src[0];
+ src += 1;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ dst[0] = src[0];
+ }
+}
+
+// Mimics SSSE3 blender
+#define BLENDER1(a, b, f) ((a) * (0x7f ^ f) + (b) * f) >> 7
+#define BLENDERC(a, b, f, s) (uint32)( \
+ BLENDER1(((a) >> s) & 255, ((b) >> s) & 255, f) << s)
+#define BLENDER(a, b, f) \
+ BLENDERC(a, b, f, 24) | BLENDERC(a, b, f, 16) | \
+ BLENDERC(a, b, f, 8) | BLENDERC(a, b, f, 0)
+
+void ScaleARGBFilterCols_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ int xi = x >> 16;
+ int xf = (x >> 9) & 0x7f;
+ uint32 a = src[xi];
+ uint32 b = src[xi + 1];
+ dst[0] = BLENDER(a, b, xf);
+ x += dx;
+ xi = x >> 16;
+ xf = (x >> 9) & 0x7f;
+ a = src[xi];
+ b = src[xi + 1];
+ dst[1] = BLENDER(a, b, xf);
+ x += dx;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ int xi = x >> 16;
+ int xf = (x >> 9) & 0x7f;
+ uint32 a = src[xi];
+ uint32 b = src[xi + 1];
+ dst[0] = BLENDER(a, b, xf);
+ }
+}
+
+void ScaleARGBFilterCols64_C(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x32, int dx) {
+ int64 x = (int64)(x32);
+ const uint32* src = (const uint32*)(src_argb);
+ uint32* dst = (uint32*)(dst_argb);
+ int j;
+ for (j = 0; j < dst_width - 1; j += 2) {
+ int64 xi = x >> 16;
+ int xf = (x >> 9) & 0x7f;
+ uint32 a = src[xi];
+ uint32 b = src[xi + 1];
+ dst[0] = BLENDER(a, b, xf);
+ x += dx;
+ xi = x >> 16;
+ xf = (x >> 9) & 0x7f;
+ a = src[xi];
+ b = src[xi + 1];
+ dst[1] = BLENDER(a, b, xf);
+ x += dx;
+ dst += 2;
+ }
+ if (dst_width & 1) {
+ int64 xi = x >> 16;
+ int xf = (x >> 9) & 0x7f;
+ uint32 a = src[xi];
+ uint32 b = src[xi + 1];
+ dst[0] = BLENDER(a, b, xf);
+ }
+}
+#undef BLENDER1
+#undef BLENDERC
+#undef BLENDER
+
+// Scale plane vertically with bilinear interpolation.
+void ScalePlaneVertical(int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint8* src_argb, uint8* dst_argb,
+ int x, int y, int dy,
+ int bpp, enum FilterMode filtering) {
+ // TODO(fbarchard): Allow higher bpp.
+ int dst_width_bytes = dst_width * bpp;
+ void (*InterpolateRow)(uint8* dst_argb, const uint8* src_argb,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_C;
+ const int max_y = (src_height > 1) ? ((src_height - 1) << 16) - 1 : 0;
+ int j;
+ assert(bpp >= 1 && bpp <= 4);
+ assert(src_height != 0);
+ assert(dst_width > 0);
+ assert(dst_height > 0);
+ src_argb += (x >> 16) * bpp;
+#if defined(HAS_INTERPOLATEROW_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_SSE2;
+ if (IS_ALIGNED(dst_width_bytes, 16)) {
+ InterpolateRow = InterpolateRow_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_SSSE3;
+ if (IS_ALIGNED(dst_width_bytes, 16)) {
+ InterpolateRow = InterpolateRow_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_AVX2;
+ if (IS_ALIGNED(dst_width_bytes, 32)) {
+ InterpolateRow = InterpolateRow_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_NEON;
+ if (IS_ALIGNED(dst_width_bytes, 16)) {
+ InterpolateRow = InterpolateRow_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
+ InterpolateRow = InterpolateRow_Any_MIPS_DSPR2;
+ if (IS_ALIGNED(dst_width_bytes, 4)) {
+ InterpolateRow = InterpolateRow_MIPS_DSPR2;
+ }
+ }
+#endif
+ for (j = 0; j < dst_height; ++j) {
+ int yi;
+ int yf;
+ if (y > max_y) {
+ y = max_y;
+ }
+ yi = y >> 16;
+ yf = filtering ? ((y >> 8) & 255) : 0;
+ InterpolateRow(dst_argb, src_argb + yi * src_stride,
+ src_stride, dst_width_bytes, yf);
+ dst_argb += dst_stride;
+ y += dy;
+ }
+}
+void ScalePlaneVertical_16(int src_height,
+ int dst_width, int dst_height,
+ int src_stride, int dst_stride,
+ const uint16* src_argb, uint16* dst_argb,
+ int x, int y, int dy,
+ int wpp, enum FilterMode filtering) {
+ // TODO(fbarchard): Allow higher wpp.
+ int dst_width_words = dst_width * wpp;
+ void (*InterpolateRow)(uint16* dst_argb, const uint16* src_argb,
+ ptrdiff_t src_stride, int dst_width, int source_y_fraction) =
+ InterpolateRow_16_C;
+ const int max_y = (src_height > 1) ? ((src_height - 1) << 16) - 1 : 0;
+ int j;
+ assert(wpp >= 1 && wpp <= 2);
+ assert(src_height != 0);
+ assert(dst_width > 0);
+ assert(dst_height > 0);
+ src_argb += (x >> 16) * wpp;
+#if defined(HAS_INTERPOLATEROW_16_SSE2)
+ if (TestCpuFlag(kCpuHasSSE2)) {
+ InterpolateRow = InterpolateRow_Any_16_SSE2;
+ if (IS_ALIGNED(dst_width_bytes, 16)) {
+ InterpolateRow = InterpolateRow_16_SSE2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_SSSE3)
+ if (TestCpuFlag(kCpuHasSSSE3)) {
+ InterpolateRow = InterpolateRow_Any_16_SSSE3;
+ if (IS_ALIGNED(dst_width_bytes, 16)) {
+ InterpolateRow = InterpolateRow_16_SSSE3;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_AVX2)
+ if (TestCpuFlag(kCpuHasAVX2)) {
+ InterpolateRow = InterpolateRow_Any_16_AVX2;
+ if (IS_ALIGNED(dst_width_bytes, 32)) {
+ InterpolateRow = InterpolateRow_16_AVX2;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_NEON)
+ if (TestCpuFlag(kCpuHasNEON)) {
+ InterpolateRow = InterpolateRow_Any_16_NEON;
+ if (IS_ALIGNED(dst_width_bytes, 16)) {
+ InterpolateRow = InterpolateRow_16_NEON;
+ }
+ }
+#endif
+#if defined(HAS_INTERPOLATEROW_16_MIPS_DSPR2)
+ if (TestCpuFlag(kCpuHasMIPS_DSPR2) &&
+ IS_ALIGNED(src_argb, 4) && IS_ALIGNED(src_stride, 4) &&
+ IS_ALIGNED(dst_argb, 4) && IS_ALIGNED(dst_stride, 4)) {
+ InterpolateRow = InterpolateRow_Any_16_MIPS_DSPR2;
+ if (IS_ALIGNED(dst_width_bytes, 4)) {
+ InterpolateRow = InterpolateRow_16_MIPS_DSPR2;
+ }
+ }
+#endif
+ for (j = 0; j < dst_height; ++j) {
+ int yi;
+ int yf;
+ if (y > max_y) {
+ y = max_y;
+ }
+ yi = y >> 16;
+ yf = filtering ? ((y >> 8) & 255) : 0;
+ InterpolateRow(dst_argb, src_argb + yi * src_stride,
+ src_stride, dst_width_words, yf);
+ dst_argb += dst_stride;
+ y += dy;
+ }
+}
+
+// Simplify the filtering based on scale factors.
+enum FilterMode ScaleFilterReduce(int src_width, int src_height,
+ int dst_width, int dst_height,
+ enum FilterMode filtering) {
+ if (src_width < 0) {
+ src_width = -src_width;
+ }
+ if (src_height < 0) {
+ src_height = -src_height;
+ }
+ if (filtering == kFilterBox) {
+ // If scaling both axis to 0.5 or larger, switch from Box to Bilinear.
+ if (dst_width * 2 >= src_width && dst_height * 2 >= src_height) {
+ filtering = kFilterBilinear;
+ }
+ }
+ if (filtering == kFilterBilinear) {
+ if (src_height == 1) {
+ filtering = kFilterLinear;
+ }
+ // TODO(fbarchard): Detect any odd scale factor and reduce to Linear.
+ if (dst_height == src_height || dst_height * 3 == src_height) {
+ filtering = kFilterLinear;
+ }
+ // TODO(fbarchard): Remove 1 pixel wide filter restriction, which is to
+ // avoid reading 2 pixels horizontally that causes memory exception.
+ if (src_width == 1) {
+ filtering = kFilterNone;
+ }
+ }
+ if (filtering == kFilterLinear) {
+ if (src_width == 1) {
+ filtering = kFilterNone;
+ }
+ // TODO(fbarchard): Detect any odd scale factor and reduce to None.
+ if (dst_width == src_width || dst_width * 3 == src_width) {
+ filtering = kFilterNone;
+ }
+ }
+ return filtering;
+}
+
+// Divide num by div and return as 16.16 fixed point result.
+int FixedDiv_C(int num, int div) {
+ return (int)(((int64)(num) << 16) / div);
+}
+
+// Divide num by div and return as 16.16 fixed point result.
+int FixedDiv1_C(int num, int div) {
+ return (int)((((int64)(num) << 16) - 0x00010001) /
+ (div - 1));
+}
+
+#define CENTERSTART(dx, s) (dx < 0) ? -((-dx >> 1) + s) : ((dx >> 1) + s)
+
+// Compute slope values for stepping.
+void ScaleSlope(int src_width, int src_height,
+ int dst_width, int dst_height,
+ enum FilterMode filtering,
+ int* x, int* y, int* dx, int* dy) {
+ assert(x != NULL);
+ assert(y != NULL);
+ assert(dx != NULL);
+ assert(dy != NULL);
+ assert(src_width != 0);
+ assert(src_height != 0);
+ assert(dst_width > 0);
+ assert(dst_height > 0);
+ // Check for 1 pixel and avoid FixedDiv overflow.
+ if (dst_width == 1 && src_width >= 32768) {
+ dst_width = src_width;
+ }
+ if (dst_height == 1 && src_height >= 32768) {
+ dst_height = src_height;
+ }
+ if (filtering == kFilterBox) {
+ // Scale step for point sampling duplicates all pixels equally.
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *dy = FixedDiv(src_height, dst_height);
+ *x = 0;
+ *y = 0;
+ } else if (filtering == kFilterBilinear) {
+ // Scale step for bilinear sampling renders last pixel once for upsample.
+ if (dst_width <= Abs(src_width)) {
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *x = CENTERSTART(*dx, -32768); // Subtract 0.5 (32768) to center filter.
+ } else if (dst_width > 1) {
+ *dx = FixedDiv1(Abs(src_width), dst_width);
+ *x = 0;
+ }
+ if (dst_height <= src_height) {
+ *dy = FixedDiv(src_height, dst_height);
+ *y = CENTERSTART(*dy, -32768); // Subtract 0.5 (32768) to center filter.
+ } else if (dst_height > 1) {
+ *dy = FixedDiv1(src_height, dst_height);
+ *y = 0;
+ }
+ } else if (filtering == kFilterLinear) {
+ // Scale step for bilinear sampling renders last pixel once for upsample.
+ if (dst_width <= Abs(src_width)) {
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *x = CENTERSTART(*dx, -32768); // Subtract 0.5 (32768) to center filter.
+ } else if (dst_width > 1) {
+ *dx = FixedDiv1(Abs(src_width), dst_width);
+ *x = 0;
+ }
+ *dy = FixedDiv(src_height, dst_height);
+ *y = *dy >> 1;
+ } else {
+ // Scale step for point sampling duplicates all pixels equally.
+ *dx = FixedDiv(Abs(src_width), dst_width);
+ *dy = FixedDiv(src_height, dst_height);
+ *x = CENTERSTART(*dx, 0);
+ *y = CENTERSTART(*dy, 0);
+ }
+ // Negative src_width means horizontally mirror.
+ if (src_width < 0) {
+ *x += (dst_width - 1) * *dx;
+ *dx = -*dx;
+ // src_width = -src_width; // Caller must do this.
+ }
+}
+#undef CENTERSTART
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/scale_gcc.cc b/media/libaom/src/third_party/libyuv/source/scale_gcc.cc
new file mode 100644
index 000000000..8a6ac5459
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_gcc.cc
@@ -0,0 +1,1089 @@
+/*
+ * Copyright 2013 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC x86 and x64.
+#if !defined(LIBYUV_DISABLE_X86) && (defined(__x86_64__) || defined(__i386__))
+
+// Offsets for source bytes 0 to 9
+static uvec8 kShuf0 =
+ { 0, 1, 3, 4, 5, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Offsets for source bytes 11 to 20 with 8 subtracted = 3 to 12.
+static uvec8 kShuf1 =
+ { 3, 4, 5, 7, 8, 9, 11, 12, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Offsets for source bytes 21 to 31 with 16 subtracted = 5 to 31.
+static uvec8 kShuf2 =
+ { 5, 7, 8, 9, 11, 12, 13, 15, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Offsets for source bytes 0 to 10
+static uvec8 kShuf01 =
+ { 0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10 };
+
+// Offsets for source bytes 10 to 21 with 8 subtracted = 3 to 13.
+static uvec8 kShuf11 =
+ { 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13 };
+
+// Offsets for source bytes 21 to 31 with 16 subtracted = 5 to 31.
+static uvec8 kShuf21 =
+ { 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15 };
+
+// Coefficients for source bytes 0 to 10
+static uvec8 kMadd01 =
+ { 3, 1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3, 1, 2, 2 };
+
+// Coefficients for source bytes 10 to 21
+static uvec8 kMadd11 =
+ { 1, 3, 3, 1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3, 1 };
+
+// Coefficients for source bytes 21 to 31
+static uvec8 kMadd21 =
+ { 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3 };
+
+// Coefficients for source bytes 21 to 31
+static vec16 kRound34 =
+ { 2, 2, 2, 2, 2, 2, 2, 2 };
+
+static uvec8 kShuf38a =
+ { 0, 3, 6, 8, 11, 14, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+static uvec8 kShuf38b =
+ { 128, 128, 128, 128, 128, 128, 0, 3, 6, 8, 11, 14, 128, 128, 128, 128 };
+
+// Arrange words 0,3,6 into 0,1,2
+static uvec8 kShufAc =
+ { 0, 1, 6, 7, 12, 13, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Arrange words 0,3,6 into 3,4,5
+static uvec8 kShufAc3 =
+ { 128, 128, 128, 128, 128, 128, 0, 1, 6, 7, 12, 13, 128, 128, 128, 128 };
+
+// Scaling values for boxes of 3x3 and 2x3
+static uvec16 kScaleAc33 =
+ { 65536 / 9, 65536 / 9, 65536 / 6, 65536 / 9, 65536 / 9, 65536 / 6, 0, 0 };
+
+// Arrange first value for pixels 0,1,2,3,4,5
+static uvec8 kShufAb0 =
+ { 0, 128, 3, 128, 6, 128, 8, 128, 11, 128, 14, 128, 128, 128, 128, 128 };
+
+// Arrange second value for pixels 0,1,2,3,4,5
+static uvec8 kShufAb1 =
+ { 1, 128, 4, 128, 7, 128, 9, 128, 12, 128, 15, 128, 128, 128, 128, 128 };
+
+// Arrange third value for pixels 0,1,2,3,4,5
+static uvec8 kShufAb2 =
+ { 2, 128, 5, 128, 128, 128, 10, 128, 13, 128, 128, 128, 128, 128, 128, 128 };
+
+// Scaling values for boxes of 3x2 and 2x2
+static uvec16 kScaleAb2 =
+ { 65536 / 3, 65536 / 3, 65536 / 2, 65536 / 3, 65536 / 3, 65536 / 2, 0, 0 };
+
+// GCC versions of row functions are verbatim conversions from Visual C.
+// Generated using gcc disassembly on Visual C object file:
+// objdump -D yuvscaler.obj >yuvscaler.txt
+
+void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1"
+ );
+}
+
+void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10, 0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "pand %%xmm5,%%xmm2 \n"
+ "pand %%xmm5,%%xmm3 \n"
+ "pavgw %%xmm2,%%xmm0 \n"
+ "pavgw %%xmm3,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrlw $0x8,%%xmm5 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2
+ MEMOPREG(movdqu,0x10,0,3,1,xmm3) // movdqu 0x10(%0,%3,1),%%xmm3
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "pand %%xmm5,%%xmm2 \n"
+ "pand %%xmm5,%%xmm3 \n"
+ "pavgw %%xmm2,%%xmm0 \n"
+ "pavgw %%xmm3,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x10,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"((intptr_t)(src_stride)) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm5"
+ );
+}
+
+void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "pcmpeqb %%xmm5,%%xmm5 \n"
+ "psrld $0x18,%%xmm5 \n"
+ "pslld $0x10,%%xmm5 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pand %%xmm5,%%xmm0 \n"
+ "pand %%xmm5,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1", "xmm5"
+ );
+}
+
+void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ intptr_t stridex3 = 0;
+ asm volatile (
+ "pcmpeqb %%xmm7,%%xmm7 \n"
+ "psrlw $0x8,%%xmm7 \n"
+ "lea " MEMLEA4(0x00,4,4,2) ",%3 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,4,1,xmm2) // movdqu (%0,%4,1),%%xmm2
+ MEMOPREG(movdqu,0x10,0,4,1,xmm3) // movdqu 0x10(%0,%4,1),%%xmm3
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,4,2,xmm2) // movdqu (%0,%4,2),%%xmm2
+ MEMOPREG(movdqu,0x10,0,4,2,xmm3) // movdqu 0x10(%0,%4,2),%%xmm3
+ MEMOPREG(movdqu,0x00,0,3,1,xmm4) // movdqu (%0,%3,1),%%xmm4
+ MEMOPREG(movdqu,0x10,0,3,1,xmm5) // movdqu 0x10(%0,%3,1),%%xmm5
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm4,%%xmm2 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm5,%%xmm3 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "movdqa %%xmm1,%%xmm3 \n"
+ "psrlw $0x8,%%xmm1 \n"
+ "pand %%xmm7,%%xmm2 \n"
+ "pand %%xmm7,%%xmm3 \n"
+ "pavgw %%xmm2,%%xmm0 \n"
+ "pavgw %%xmm3,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "psrlw $0x8,%%xmm0 \n"
+ "pand %%xmm7,%%xmm2 \n"
+ "pavgw %%xmm2,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x8,1) ",%1 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(stridex3) // %3
+ : "r"((intptr_t)(src_stride)) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm7"
+ );
+}
+
+void ScaleRowDown34_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movdqa %0,%%xmm3 \n"
+ "movdqa %1,%%xmm4 \n"
+ "movdqa %2,%%xmm5 \n"
+ :
+ : "m"(kShuf0), // %0
+ "m"(kShuf1), // %1
+ "m"(kShuf2) // %2
+ );
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm2 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "movdqa %%xmm2,%%xmm1 \n"
+ "palignr $0x8,%%xmm0,%%xmm1 \n"
+ "pshufb %%xmm3,%%xmm0 \n"
+ "pshufb %%xmm4,%%xmm1 \n"
+ "pshufb %%xmm5,%%xmm2 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "movq %%xmm1," MEMACCESS2(0x8,1) " \n"
+ "movq %%xmm2," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x18,1) ",%1 \n"
+ "sub $0x18,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5"
+ );
+}
+
+void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movdqa %0,%%xmm2 \n" // kShuf01
+ "movdqa %1,%%xmm3 \n" // kShuf11
+ "movdqa %2,%%xmm4 \n" // kShuf21
+ :
+ : "m"(kShuf01), // %0
+ "m"(kShuf11), // %1
+ "m"(kShuf21) // %2
+ );
+ asm volatile (
+ "movdqa %0,%%xmm5 \n" // kMadd01
+ "movdqa %1,%%xmm0 \n" // kMadd11
+ "movdqa %2,%%xmm1 \n" // kRound34
+ :
+ : "m"(kMadd01), // %0
+ "m"(kMadd11), // %1
+ "m"(kRound34) // %2
+ );
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x00,0,3,1,xmm7) // movdqu (%0,%3),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+ "pshufb %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm5,%%xmm6 \n"
+ "paddsw %%xmm1,%%xmm6 \n"
+ "psrlw $0x2,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movq %%xmm6," MEMACCESS(1) " \n"
+ "movdqu " MEMACCESS2(0x8,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x8,0,3,1,xmm7) // movdqu 0x8(%0,%3),%%xmm7
+ "pavgb %%xmm7,%%xmm6 \n"
+ "pshufb %%xmm3,%%xmm6 \n"
+ "pmaddubsw %%xmm0,%%xmm6 \n"
+ "paddsw %%xmm1,%%xmm6 \n"
+ "psrlw $0x2,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movq %%xmm6," MEMACCESS2(0x8,1) " \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x10,0,3,1,xmm7) // movdqu 0x10(%0,%3),%%xmm7
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm7,%%xmm6 \n"
+ "pshufb %%xmm4,%%xmm6 \n"
+ "pmaddubsw %4,%%xmm6 \n"
+ "paddsw %%xmm1,%%xmm6 \n"
+ "psrlw $0x2,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movq %%xmm6," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x18,1) ",%1 \n"
+ "sub $0x18,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"((intptr_t)(src_stride)), // %3
+ "m"(kMadd21) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movdqa %0,%%xmm2 \n" // kShuf01
+ "movdqa %1,%%xmm3 \n" // kShuf11
+ "movdqa %2,%%xmm4 \n" // kShuf21
+ :
+ : "m"(kShuf01), // %0
+ "m"(kShuf11), // %1
+ "m"(kShuf21) // %2
+ );
+ asm volatile (
+ "movdqa %0,%%xmm5 \n" // kMadd01
+ "movdqa %1,%%xmm0 \n" // kMadd11
+ "movdqa %2,%%xmm1 \n" // kRound34
+ :
+ : "m"(kMadd01), // %0
+ "m"(kMadd11), // %1
+ "m"(kRound34) // %2
+ );
+
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x00,0,3,1,xmm7) // movdqu (%0,%3,1),%%xmm7
+ "pavgb %%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm6 \n"
+ "pshufb %%xmm2,%%xmm6 \n"
+ "pmaddubsw %%xmm5,%%xmm6 \n"
+ "paddsw %%xmm1,%%xmm6 \n"
+ "psrlw $0x2,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movq %%xmm6," MEMACCESS(1) " \n"
+ "movdqu " MEMACCESS2(0x8,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x8,0,3,1,xmm7) // movdqu 0x8(%0,%3,1),%%xmm7
+ "pavgb %%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm6 \n"
+ "pshufb %%xmm3,%%xmm6 \n"
+ "pmaddubsw %%xmm0,%%xmm6 \n"
+ "paddsw %%xmm1,%%xmm6 \n"
+ "psrlw $0x2,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movq %%xmm6," MEMACCESS2(0x8,1) " \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm6 \n"
+ MEMOPREG(movdqu,0x10,0,3,1,xmm7) // movdqu 0x10(%0,%3,1),%%xmm7
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm6,%%xmm7 \n"
+ "pavgb %%xmm7,%%xmm6 \n"
+ "pshufb %%xmm4,%%xmm6 \n"
+ "pmaddubsw %4,%%xmm6 \n"
+ "paddsw %%xmm1,%%xmm6 \n"
+ "psrlw $0x2,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movq %%xmm6," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x18,1) ",%1 \n"
+ "sub $0x18,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"((intptr_t)(src_stride)), // %3
+ "m"(kMadd21) // %4
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+void ScaleRowDown38_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movdqa %3,%%xmm4 \n"
+ "movdqa %4,%%xmm5 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "pshufb %%xmm5,%%xmm1 \n"
+ "paddusb %%xmm1,%%xmm0 \n"
+ "movq %%xmm0," MEMACCESS(1) " \n"
+ "movhlps %%xmm0,%%xmm1 \n"
+ "movd %%xmm1," MEMACCESS2(0x8,1) " \n"
+ "lea " MEMLEA(0xc,1) ",%1 \n"
+ "sub $0xc,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "m"(kShuf38a), // %3
+ "m"(kShuf38b) // %4
+ : "memory", "cc", "xmm0", "xmm1", "xmm4", "xmm5"
+ );
+}
+
+void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movdqa %0,%%xmm2 \n"
+ "movdqa %1,%%xmm3 \n"
+ "movdqa %2,%%xmm4 \n"
+ "movdqa %3,%%xmm5 \n"
+ :
+ : "m"(kShufAb0), // %0
+ "m"(kShufAb1), // %1
+ "m"(kShufAb2), // %2
+ "m"(kScaleAb2) // %3
+ );
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,3,1,xmm1) // movdqu (%0,%3,1),%%xmm1
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "pavgb %%xmm1,%%xmm0 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "pshufb %%xmm2,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm6 \n"
+ "pshufb %%xmm3,%%xmm6 \n"
+ "paddusw %%xmm6,%%xmm1 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "paddusw %%xmm0,%%xmm1 \n"
+ "pmulhuw %%xmm5,%%xmm1 \n"
+ "packuswb %%xmm1,%%xmm1 \n"
+ "movd %%xmm1," MEMACCESS(1) " \n"
+ "psrlq $0x10,%%xmm1 \n"
+ "movd %%xmm1," MEMACCESS2(0x2,1) " \n"
+ "lea " MEMLEA(0x6,1) ",%1 \n"
+ "sub $0x6,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"((intptr_t)(src_stride)) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+
+void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movdqa %0,%%xmm2 \n"
+ "movdqa %1,%%xmm3 \n"
+ "movdqa %2,%%xmm4 \n"
+ "pxor %%xmm5,%%xmm5 \n"
+ :
+ : "m"(kShufAc), // %0
+ "m"(kShufAc3), // %1
+ "m"(kScaleAc33) // %2
+ );
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movdqu,0x00,0,3,1,xmm6) // movdqu (%0,%3,1),%%xmm6
+ "movhlps %%xmm0,%%xmm1 \n"
+ "movhlps %%xmm6,%%xmm7 \n"
+ "punpcklbw %%xmm5,%%xmm0 \n"
+ "punpcklbw %%xmm5,%%xmm1 \n"
+ "punpcklbw %%xmm5,%%xmm6 \n"
+ "punpcklbw %%xmm5,%%xmm7 \n"
+ "paddusw %%xmm6,%%xmm0 \n"
+ "paddusw %%xmm7,%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,3,2,xmm6) // movdqu (%0,%3,2),%%xmm6
+ "lea " MEMLEA(0x10,0) ",%0 \n"
+ "movhlps %%xmm6,%%xmm7 \n"
+ "punpcklbw %%xmm5,%%xmm6 \n"
+ "punpcklbw %%xmm5,%%xmm7 \n"
+ "paddusw %%xmm6,%%xmm0 \n"
+ "paddusw %%xmm7,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm6 \n"
+ "psrldq $0x2,%%xmm0 \n"
+ "paddusw %%xmm0,%%xmm6 \n"
+ "psrldq $0x2,%%xmm0 \n"
+ "paddusw %%xmm0,%%xmm6 \n"
+ "pshufb %%xmm2,%%xmm6 \n"
+ "movdqa %%xmm1,%%xmm7 \n"
+ "psrldq $0x2,%%xmm1 \n"
+ "paddusw %%xmm1,%%xmm7 \n"
+ "psrldq $0x2,%%xmm1 \n"
+ "paddusw %%xmm1,%%xmm7 \n"
+ "pshufb %%xmm3,%%xmm7 \n"
+ "paddusw %%xmm7,%%xmm6 \n"
+ "pmulhuw %%xmm4,%%xmm6 \n"
+ "packuswb %%xmm6,%%xmm6 \n"
+ "movd %%xmm6," MEMACCESS(1) " \n"
+ "psrlq $0x10,%%xmm6 \n"
+ "movd %%xmm6," MEMACCESS2(0x2,1) " \n"
+ "lea " MEMLEA(0x6,1) ",%1 \n"
+ "sub $0x6,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"((intptr_t)(src_stride)) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7"
+ );
+}
+
+// Reads 16xN bytes and produces 16 shorts at a time.
+void ScaleAddRows_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int src_width, int src_height) {
+ int tmp_height = 0;
+ intptr_t tmp_src = 0;
+ asm volatile (
+ "mov %0,%3 \n" // row pointer
+ "mov %5,%2 \n" // height
+ "pxor %%xmm0,%%xmm0 \n" // clear accumulators
+ "pxor %%xmm1,%%xmm1 \n"
+ "pxor %%xmm4,%%xmm4 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(3) ",%%xmm2 \n"
+ "add %6,%3 \n"
+ "movdqa %%xmm2,%%xmm3 \n"
+ "punpcklbw %%xmm4,%%xmm2 \n"
+ "punpckhbw %%xmm4,%%xmm3 \n"
+ "paddusw %%xmm2,%%xmm0 \n"
+ "paddusw %%xmm3,%%xmm1 \n"
+ "sub $0x1,%2 \n"
+ "jg 1b \n"
+
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,1) " \n"
+ "lea " MEMLEA(0x20,1) ",%1 \n"
+ "lea " MEMLEA(0x10,0) ",%0 \n" // src_ptr += 16
+ "mov %0,%3 \n" // row pointer
+ "mov %5,%2 \n" // height
+ "pxor %%xmm0,%%xmm0 \n" // clear accumulators
+ "pxor %%xmm1,%%xmm1 \n"
+ "sub $0x10,%4 \n"
+ "jg 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(tmp_height), // %2
+ "+r"(tmp_src), // %3
+ "+r"(src_width), // %4
+ "+rm"(src_height) // %5
+ : "rm"((intptr_t)(src_stride)) // %6
+ : "memory", "cc", "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"
+ );
+}
+
+// Bilinear column filtering. SSSE3 version.
+void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ intptr_t x0 = 0, x1 = 0, temp_pixel = 0;
+ asm volatile (
+ "movd %6,%%xmm2 \n"
+ "movd %7,%%xmm3 \n"
+ "movl $0x04040000,%k2 \n"
+ "movd %k2,%%xmm5 \n"
+ "pcmpeqb %%xmm6,%%xmm6 \n"
+ "psrlw $0x9,%%xmm6 \n"
+ "pextrw $0x1,%%xmm2,%k3 \n"
+ "subl $0x2,%5 \n"
+ "jl 29f \n"
+ "movdqa %%xmm2,%%xmm0 \n"
+ "paddd %%xmm3,%%xmm0 \n"
+ "punpckldq %%xmm0,%%xmm2 \n"
+ "punpckldq %%xmm3,%%xmm3 \n"
+ "paddd %%xmm3,%%xmm3 \n"
+ "pextrw $0x3,%%xmm2,%k4 \n"
+
+ LABELALIGN
+ "2: \n"
+ "movdqa %%xmm2,%%xmm1 \n"
+ "paddd %%xmm3,%%xmm2 \n"
+ MEMOPARG(movzwl,0x00,1,3,1,k2) // movzwl (%1,%3,1),%k2
+ "movd %k2,%%xmm0 \n"
+ "psrlw $0x9,%%xmm1 \n"
+ MEMOPARG(movzwl,0x00,1,4,1,k2) // movzwl (%1,%4,1),%k2
+ "movd %k2,%%xmm4 \n"
+ "pshufb %%xmm5,%%xmm1 \n"
+ "punpcklwd %%xmm4,%%xmm0 \n"
+ "pxor %%xmm6,%%xmm1 \n"
+ "pmaddubsw %%xmm1,%%xmm0 \n"
+ "pextrw $0x1,%%xmm2,%k3 \n"
+ "pextrw $0x3,%%xmm2,%k4 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movd %%xmm0,%k2 \n"
+ "mov %w2," MEMACCESS(0) " \n"
+ "lea " MEMLEA(0x2,0) ",%0 \n"
+ "sub $0x2,%5 \n"
+ "jge 2b \n"
+
+ LABELALIGN
+ "29: \n"
+ "addl $0x1,%5 \n"
+ "jl 99f \n"
+ MEMOPARG(movzwl,0x00,1,3,1,k2) // movzwl (%1,%3,1),%k2
+ "movd %k2,%%xmm0 \n"
+ "psrlw $0x9,%%xmm2 \n"
+ "pshufb %%xmm5,%%xmm2 \n"
+ "pxor %%xmm6,%%xmm2 \n"
+ "pmaddubsw %%xmm2,%%xmm0 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movd %%xmm0,%k2 \n"
+ "mov %b2," MEMACCESS(0) " \n"
+ "99: \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+a"(temp_pixel), // %2
+ "+r"(x0), // %3
+ "+r"(x1), // %4
+ "+rm"(dst_width) // %5
+ : "rm"(x), // %6
+ "rm"(dx) // %7
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+
+// Reads 4 pixels, duplicates them and writes 8 pixels.
+// Alignment requirement: src_argb 16 byte aligned, dst_argb 16 byte aligned.
+void ScaleColsUp2_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpcklbw %%xmm0,%%xmm0 \n"
+ "punpckhbw %%xmm1,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(0) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,0) " \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "sub $0x20,%2 \n"
+ "jg 1b \n"
+
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1"
+ );
+}
+
+void ScaleARGBRowDown2_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1"
+ );
+}
+
+void ScaleARGBRowDown2Linear_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm2 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", "xmm0", "xmm1"
+ );
+}
+
+void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(0) ",%%xmm0 \n"
+ "movdqu " MEMACCESS2(0x10,0) ",%%xmm1 \n"
+ MEMOPREG(movdqu,0x00,0,3,1,xmm2) // movdqu (%0,%3,1),%%xmm2
+ MEMOPREG(movdqu,0x10,0,3,1,xmm3) // movdqu 0x10(%0,%3,1),%%xmm3
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm2 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(1) " \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "sub $0x4,%2 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ : "r"((intptr_t)(src_stride)) // %3
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3"
+ );
+}
+
+// Reads 4 pixels at a time.
+// Alignment requirement: dst_argb 16 byte aligned.
+void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx, uint8* dst_argb, int dst_width) {
+ intptr_t src_stepx_x4 = (intptr_t)(src_stepx);
+ intptr_t src_stepx_x12 = 0;
+ asm volatile (
+ "lea " MEMLEA3(0x00,1,4) ",%1 \n"
+ "lea " MEMLEA4(0x00,1,1,2) ",%4 \n"
+ LABELALIGN
+ "1: \n"
+ "movd " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movd,0x00,0,1,1,xmm1) // movd (%0,%1,1),%%xmm1
+ "punpckldq %%xmm1,%%xmm0 \n"
+ MEMOPREG(movd,0x00,0,1,2,xmm2) // movd (%0,%1,2),%%xmm2
+ MEMOPREG(movd,0x00,0,4,1,xmm3) // movd (%0,%4,1),%%xmm3
+ "lea " MEMLEA4(0x00,0,1,4) ",%0 \n"
+ "punpckldq %%xmm3,%%xmm2 \n"
+ "punpcklqdq %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_stepx_x4), // %1
+ "+r"(dst_argb), // %2
+ "+r"(dst_width), // %3
+ "+r"(src_stepx_x12) // %4
+ :: "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3"
+ );
+}
+
+// Blends four 2x2 to 4x1.
+// Alignment requirement: dst_argb 16 byte aligned.
+void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride, int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ intptr_t src_stepx_x4 = (intptr_t)(src_stepx);
+ intptr_t src_stepx_x12 = 0;
+ intptr_t row1 = (intptr_t)(src_stride);
+ asm volatile (
+ "lea " MEMLEA3(0x00,1,4) ",%1 \n"
+ "lea " MEMLEA4(0x00,1,1,2) ",%4 \n"
+ "lea " MEMLEA4(0x00,0,5,1) ",%5 \n"
+
+ LABELALIGN
+ "1: \n"
+ "movq " MEMACCESS(0) ",%%xmm0 \n"
+ MEMOPREG(movhps,0x00,0,1,1,xmm0) // movhps (%0,%1,1),%%xmm0
+ MEMOPREG(movq,0x00,0,1,2,xmm1) // movq (%0,%1,2),%%xmm1
+ MEMOPREG(movhps,0x00,0,4,1,xmm1) // movhps (%0,%4,1),%%xmm1
+ "lea " MEMLEA4(0x00,0,1,4) ",%0 \n"
+ "movq " MEMACCESS(5) ",%%xmm2 \n"
+ MEMOPREG(movhps,0x00,5,1,1,xmm2) // movhps (%5,%1,1),%%xmm2
+ MEMOPREG(movq,0x00,5,1,2,xmm3) // movq (%5,%1,2),%%xmm3
+ MEMOPREG(movhps,0x00,5,4,1,xmm3) // movhps (%5,%4,1),%%xmm3
+ "lea " MEMLEA4(0x00,5,1,4) ",%5 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "pavgb %%xmm3,%%xmm1 \n"
+ "movdqa %%xmm0,%%xmm2 \n"
+ "shufps $0x88,%%xmm1,%%xmm0 \n"
+ "shufps $0xdd,%%xmm1,%%xmm2 \n"
+ "pavgb %%xmm2,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%3 \n"
+ "jg 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_stepx_x4), // %1
+ "+r"(dst_argb), // %2
+ "+rm"(dst_width), // %3
+ "+r"(src_stepx_x12), // %4
+ "+r"(row1) // %5
+ :: "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3"
+ );
+}
+
+void ScaleARGBCols_SSE2(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ intptr_t x0 = 0, x1 = 0;
+ asm volatile (
+ "movd %5,%%xmm2 \n"
+ "movd %6,%%xmm3 \n"
+ "pshufd $0x0,%%xmm2,%%xmm2 \n"
+ "pshufd $0x11,%%xmm3,%%xmm0 \n"
+ "paddd %%xmm0,%%xmm2 \n"
+ "paddd %%xmm3,%%xmm3 \n"
+ "pshufd $0x5,%%xmm3,%%xmm0 \n"
+ "paddd %%xmm0,%%xmm2 \n"
+ "paddd %%xmm3,%%xmm3 \n"
+ "pshufd $0x0,%%xmm3,%%xmm3 \n"
+ "pextrw $0x1,%%xmm2,%k0 \n"
+ "pextrw $0x3,%%xmm2,%k1 \n"
+ "cmp $0x0,%4 \n"
+ "jl 99f \n"
+ "sub $0x4,%4 \n"
+ "jl 49f \n"
+
+ LABELALIGN
+ "40: \n"
+ MEMOPREG(movd,0x00,3,0,4,xmm0) // movd (%3,%0,4),%%xmm0
+ MEMOPREG(movd,0x00,3,1,4,xmm1) // movd (%3,%1,4),%%xmm1
+ "pextrw $0x5,%%xmm2,%k0 \n"
+ "pextrw $0x7,%%xmm2,%k1 \n"
+ "paddd %%xmm3,%%xmm2 \n"
+ "punpckldq %%xmm1,%%xmm0 \n"
+ MEMOPREG(movd,0x00,3,0,4,xmm1) // movd (%3,%0,4),%%xmm1
+ MEMOPREG(movd,0x00,3,1,4,xmm4) // movd (%3,%1,4),%%xmm4
+ "pextrw $0x1,%%xmm2,%k0 \n"
+ "pextrw $0x3,%%xmm2,%k1 \n"
+ "punpckldq %%xmm4,%%xmm1 \n"
+ "punpcklqdq %%xmm1,%%xmm0 \n"
+ "movdqu %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x10,2) ",%2 \n"
+ "sub $0x4,%4 \n"
+ "jge 40b \n"
+
+ "49: \n"
+ "test $0x2,%4 \n"
+ "je 29f \n"
+ MEMOPREG(movd,0x00,3,0,4,xmm0) // movd (%3,%0,4),%%xmm0
+ MEMOPREG(movd,0x00,3,1,4,xmm1) // movd (%3,%1,4),%%xmm1
+ "pextrw $0x5,%%xmm2,%k0 \n"
+ "punpckldq %%xmm1,%%xmm0 \n"
+ "movq %%xmm0," MEMACCESS(2) " \n"
+ "lea " MEMLEA(0x8,2) ",%2 \n"
+ "29: \n"
+ "test $0x1,%4 \n"
+ "je 99f \n"
+ MEMOPREG(movd,0x00,3,0,4,xmm0) // movd (%3,%0,4),%%xmm0
+ "movd %%xmm0," MEMACCESS(2) " \n"
+ "99: \n"
+ : "+a"(x0), // %0
+ "+d"(x1), // %1
+ "+r"(dst_argb), // %2
+ "+r"(src_argb), // %3
+ "+r"(dst_width) // %4
+ : "rm"(x), // %5
+ "rm"(dx) // %6
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4"
+ );
+}
+
+// Reads 4 pixels, duplicates them and writes 8 pixels.
+// Alignment requirement: src_argb 16 byte aligned, dst_argb 16 byte aligned.
+void ScaleARGBColsUp2_SSE2(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ asm volatile (
+ LABELALIGN
+ "1: \n"
+ "movdqu " MEMACCESS(1) ",%%xmm0 \n"
+ "lea " MEMLEA(0x10,1) ",%1 \n"
+ "movdqa %%xmm0,%%xmm1 \n"
+ "punpckldq %%xmm0,%%xmm0 \n"
+ "punpckhdq %%xmm1,%%xmm1 \n"
+ "movdqu %%xmm0," MEMACCESS(0) " \n"
+ "movdqu %%xmm1," MEMACCESS2(0x10,0) " \n"
+ "lea " MEMLEA(0x20,0) ",%0 \n"
+ "sub $0x8,%2 \n"
+ "jg 1b \n"
+
+ : "+r"(dst_argb), // %0
+ "+r"(src_argb), // %1
+ "+r"(dst_width) // %2
+ :: "memory", "cc", NACL_R14
+ "xmm0", "xmm1"
+ );
+}
+
+// Shuffle table for arranging 2 pixels into pairs for pmaddubsw
+static uvec8 kShuffleColARGB = {
+ 0u, 4u, 1u, 5u, 2u, 6u, 3u, 7u, // bbggrraa 1st pixel
+ 8u, 12u, 9u, 13u, 10u, 14u, 11u, 15u // bbggrraa 2nd pixel
+};
+
+// Shuffle table for duplicating 2 fractions into 8 bytes each
+static uvec8 kShuffleFractions = {
+ 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 4u, 4u, 4u, 4u, 4u, 4u, 4u, 4u,
+};
+
+// Bilinear row filtering combines 4x2 -> 4x1. SSSE3 version
+void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ intptr_t x0 = 0, x1 = 0;
+ asm volatile (
+ "movdqa %0,%%xmm4 \n"
+ "movdqa %1,%%xmm5 \n"
+ :
+ : "m"(kShuffleColARGB), // %0
+ "m"(kShuffleFractions) // %1
+ );
+
+ asm volatile (
+ "movd %5,%%xmm2 \n"
+ "movd %6,%%xmm3 \n"
+ "pcmpeqb %%xmm6,%%xmm6 \n"
+ "psrlw $0x9,%%xmm6 \n"
+ "pextrw $0x1,%%xmm2,%k3 \n"
+ "sub $0x2,%2 \n"
+ "jl 29f \n"
+ "movdqa %%xmm2,%%xmm0 \n"
+ "paddd %%xmm3,%%xmm0 \n"
+ "punpckldq %%xmm0,%%xmm2 \n"
+ "punpckldq %%xmm3,%%xmm3 \n"
+ "paddd %%xmm3,%%xmm3 \n"
+ "pextrw $0x3,%%xmm2,%k4 \n"
+
+ LABELALIGN
+ "2: \n"
+ "movdqa %%xmm2,%%xmm1 \n"
+ "paddd %%xmm3,%%xmm2 \n"
+ MEMOPREG(movq,0x00,1,3,4,xmm0) // movq (%1,%3,4),%%xmm0
+ "psrlw $0x9,%%xmm1 \n"
+ MEMOPREG(movhps,0x00,1,4,4,xmm0) // movhps (%1,%4,4),%%xmm0
+ "pshufb %%xmm5,%%xmm1 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "pxor %%xmm6,%%xmm1 \n"
+ "pmaddubsw %%xmm1,%%xmm0 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "pextrw $0x1,%%xmm2,%k3 \n"
+ "pextrw $0x3,%%xmm2,%k4 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movq %%xmm0," MEMACCESS(0) " \n"
+ "lea " MEMLEA(0x8,0) ",%0 \n"
+ "sub $0x2,%2 \n"
+ "jge 2b \n"
+
+ LABELALIGN
+ "29: \n"
+ "add $0x1,%2 \n"
+ "jl 99f \n"
+ "psrlw $0x9,%%xmm2 \n"
+ MEMOPREG(movq,0x00,1,3,4,xmm0) // movq (%1,%3,4),%%xmm0
+ "pshufb %%xmm5,%%xmm2 \n"
+ "pshufb %%xmm4,%%xmm0 \n"
+ "pxor %%xmm6,%%xmm2 \n"
+ "pmaddubsw %%xmm2,%%xmm0 \n"
+ "psrlw $0x7,%%xmm0 \n"
+ "packuswb %%xmm0,%%xmm0 \n"
+ "movd %%xmm0," MEMACCESS(0) " \n"
+
+ LABELALIGN
+ "99: \n"
+ : "+r"(dst_argb), // %0
+ "+r"(src_argb), // %1
+ "+rm"(dst_width), // %2
+ "+r"(x0), // %3
+ "+r"(x1) // %4
+ : "rm"(x), // %5
+ "rm"(dx) // %6
+ : "memory", "cc", NACL_R14
+ "xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6"
+ );
+}
+
+// Divide num by div and return as 16.16 fixed point result.
+int FixedDiv_X86(int num, int div) {
+ asm volatile (
+ "cdq \n"
+ "shld $0x10,%%eax,%%edx \n"
+ "shl $0x10,%%eax \n"
+ "idiv %1 \n"
+ "mov %0, %%eax \n"
+ : "+a"(num) // %0
+ : "c"(div) // %1
+ : "memory", "cc", "edx"
+ );
+ return num;
+}
+
+// Divide num - 1 by div - 1 and return as 16.16 fixed point result.
+int FixedDiv1_X86(int num, int div) {
+ asm volatile (
+ "cdq \n"
+ "shld $0x10,%%eax,%%edx \n"
+ "shl $0x10,%%eax \n"
+ "sub $0x10001,%%eax \n"
+ "sbb $0x0,%%edx \n"
+ "sub $0x1,%1 \n"
+ "idiv %1 \n"
+ "mov %0, %%eax \n"
+ : "+a"(num) // %0
+ : "c"(div) // %1
+ : "memory", "cc", "edx"
+ );
+ return num;
+}
+
+#endif // defined(__x86_64__) || defined(__i386__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/scale_mips.cc b/media/libaom/src/third_party/libyuv/source/scale_mips.cc
new file mode 100644
index 000000000..3eb4f27c4
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_mips.cc
@@ -0,0 +1,654 @@
+/*
+ * Copyright 2012 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/basic_types.h"
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC MIPS DSPR2
+#if !defined(LIBYUV_DISABLE_MIPS) && \
+ defined(__mips_dsp) && (__mips_dsp_rev >= 2) && \
+ (_MIPS_SIM == _MIPS_SIM_ABI32)
+
+void ScaleRowDown2_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ __asm__ __volatile__(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "srl $t9, %[dst_width], 4 \n" // iterations -> by 16
+ "beqz $t9, 2f \n"
+ " nop \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t1, 4(%[src_ptr]) \n" // |7|6|5|4|
+ "lw $t2, 8(%[src_ptr]) \n" // |11|10|9|8|
+ "lw $t3, 12(%[src_ptr]) \n" // |15|14|13|12|
+ "lw $t4, 16(%[src_ptr]) \n" // |19|18|17|16|
+ "lw $t5, 20(%[src_ptr]) \n" // |23|22|21|20|
+ "lw $t6, 24(%[src_ptr]) \n" // |27|26|25|24|
+ "lw $t7, 28(%[src_ptr]) \n" // |31|30|29|28|
+ // TODO(fbarchard): Use odd pixels instead of even.
+ "precr.qb.ph $t8, $t1, $t0 \n" // |6|4|2|0|
+ "precr.qb.ph $t0, $t3, $t2 \n" // |14|12|10|8|
+ "precr.qb.ph $t1, $t5, $t4 \n" // |22|20|18|16|
+ "precr.qb.ph $t2, $t7, $t6 \n" // |30|28|26|24|
+ "addiu %[src_ptr], %[src_ptr], 32 \n"
+ "addiu $t9, $t9, -1 \n"
+ "sw $t8, 0(%[dst]) \n"
+ "sw $t0, 4(%[dst]) \n"
+ "sw $t1, 8(%[dst]) \n"
+ "sw $t2, 12(%[dst]) \n"
+ "bgtz $t9, 1b \n"
+ " addiu %[dst], %[dst], 16 \n"
+
+ "2: \n"
+ "andi $t9, %[dst_width], 0xf \n" // residue
+ "beqz $t9, 3f \n"
+ " nop \n"
+
+ "21: \n"
+ "lbu $t0, 0(%[src_ptr]) \n"
+ "addiu %[src_ptr], %[src_ptr], 2 \n"
+ "addiu $t9, $t9, -1 \n"
+ "sb $t0, 0(%[dst]) \n"
+ "bgtz $t9, 21b \n"
+ " addiu %[dst], %[dst], 1 \n"
+
+ "3: \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [dst] "+r" (dst)
+ : [dst_width] "r" (dst_width)
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9"
+ );
+}
+
+void ScaleRowDown2Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ const uint8* t = src_ptr + src_stride;
+
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ "srl $t9, %[dst_width], 3 \n" // iterations -> step 8
+ "bltz $t9, 2f \n"
+ " nop \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t1, 4(%[src_ptr]) \n" // |7|6|5|4|
+ "lw $t2, 8(%[src_ptr]) \n" // |11|10|9|8|
+ "lw $t3, 12(%[src_ptr]) \n" // |15|14|13|12|
+ "lw $t4, 0(%[t]) \n" // |19|18|17|16|
+ "lw $t5, 4(%[t]) \n" // |23|22|21|20|
+ "lw $t6, 8(%[t]) \n" // |27|26|25|24|
+ "lw $t7, 12(%[t]) \n" // |31|30|29|28|
+ "addiu $t9, $t9, -1 \n"
+ "srl $t8, $t0, 16 \n" // |X|X|3|2|
+ "ins $t0, $t4, 16, 16 \n" // |17|16|1|0|
+ "ins $t4, $t8, 0, 16 \n" // |19|18|3|2|
+ "raddu.w.qb $t0, $t0 \n" // |17+16+1+0|
+ "raddu.w.qb $t4, $t4 \n" // |19+18+3+2|
+ "shra_r.w $t0, $t0, 2 \n" // |t0+2|>>2
+ "shra_r.w $t4, $t4, 2 \n" // |t4+2|>>2
+ "srl $t8, $t1, 16 \n" // |X|X|7|6|
+ "ins $t1, $t5, 16, 16 \n" // |21|20|5|4|
+ "ins $t5, $t8, 0, 16 \n" // |22|23|7|6|
+ "raddu.w.qb $t1, $t1 \n" // |21+20+5+4|
+ "raddu.w.qb $t5, $t5 \n" // |23+22+7+6|
+ "shra_r.w $t1, $t1, 2 \n" // |t1+2|>>2
+ "shra_r.w $t5, $t5, 2 \n" // |t5+2|>>2
+ "srl $t8, $t2, 16 \n" // |X|X|11|10|
+ "ins $t2, $t6, 16, 16 \n" // |25|24|9|8|
+ "ins $t6, $t8, 0, 16 \n" // |27|26|11|10|
+ "raddu.w.qb $t2, $t2 \n" // |25+24+9+8|
+ "raddu.w.qb $t6, $t6 \n" // |27+26+11+10|
+ "shra_r.w $t2, $t2, 2 \n" // |t2+2|>>2
+ "shra_r.w $t6, $t6, 2 \n" // |t5+2|>>2
+ "srl $t8, $t3, 16 \n" // |X|X|15|14|
+ "ins $t3, $t7, 16, 16 \n" // |29|28|13|12|
+ "ins $t7, $t8, 0, 16 \n" // |31|30|15|14|
+ "raddu.w.qb $t3, $t3 \n" // |29+28+13+12|
+ "raddu.w.qb $t7, $t7 \n" // |31+30+15+14|
+ "shra_r.w $t3, $t3, 2 \n" // |t3+2|>>2
+ "shra_r.w $t7, $t7, 2 \n" // |t7+2|>>2
+ "addiu %[src_ptr], %[src_ptr], 16 \n"
+ "addiu %[t], %[t], 16 \n"
+ "sb $t0, 0(%[dst]) \n"
+ "sb $t4, 1(%[dst]) \n"
+ "sb $t1, 2(%[dst]) \n"
+ "sb $t5, 3(%[dst]) \n"
+ "sb $t2, 4(%[dst]) \n"
+ "sb $t6, 5(%[dst]) \n"
+ "sb $t3, 6(%[dst]) \n"
+ "sb $t7, 7(%[dst]) \n"
+ "bgtz $t9, 1b \n"
+ " addiu %[dst], %[dst], 8 \n"
+
+ "2: \n"
+ "andi $t9, %[dst_width], 0x7 \n" // x = residue
+ "beqz $t9, 3f \n"
+ " nop \n"
+
+ "21: \n"
+ "lwr $t1, 0(%[src_ptr]) \n"
+ "lwl $t1, 3(%[src_ptr]) \n"
+ "lwr $t2, 0(%[t]) \n"
+ "lwl $t2, 3(%[t]) \n"
+ "srl $t8, $t1, 16 \n"
+ "ins $t1, $t2, 16, 16 \n"
+ "ins $t2, $t8, 0, 16 \n"
+ "raddu.w.qb $t1, $t1 \n"
+ "raddu.w.qb $t2, $t2 \n"
+ "shra_r.w $t1, $t1, 2 \n"
+ "shra_r.w $t2, $t2, 2 \n"
+ "sb $t1, 0(%[dst]) \n"
+ "sb $t2, 1(%[dst]) \n"
+ "addiu %[src_ptr], %[src_ptr], 4 \n"
+ "addiu $t9, $t9, -2 \n"
+ "addiu %[t], %[t], 4 \n"
+ "bgtz $t9, 21b \n"
+ " addiu %[dst], %[dst], 2 \n"
+
+ "3: \n"
+ ".set pop \n"
+
+ : [src_ptr] "+r" (src_ptr),
+ [dst] "+r" (dst), [t] "+r" (t)
+ : [dst_width] "r" (dst_width)
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9"
+ );
+}
+
+void ScaleRowDown4_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ "srl $t9, %[dst_width], 3 \n"
+ "beqz $t9, 2f \n"
+ " nop \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t1, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t2, 4(%[src_ptr]) \n" // |7|6|5|4|
+ "lw $t3, 8(%[src_ptr]) \n" // |11|10|9|8|
+ "lw $t4, 12(%[src_ptr]) \n" // |15|14|13|12|
+ "lw $t5, 16(%[src_ptr]) \n" // |19|18|17|16|
+ "lw $t6, 20(%[src_ptr]) \n" // |23|22|21|20|
+ "lw $t7, 24(%[src_ptr]) \n" // |27|26|25|24|
+ "lw $t8, 28(%[src_ptr]) \n" // |31|30|29|28|
+ "precr.qb.ph $t1, $t2, $t1 \n" // |6|4|2|0|
+ "precr.qb.ph $t2, $t4, $t3 \n" // |14|12|10|8|
+ "precr.qb.ph $t5, $t6, $t5 \n" // |22|20|18|16|
+ "precr.qb.ph $t6, $t8, $t7 \n" // |30|28|26|24|
+ "precr.qb.ph $t1, $t2, $t1 \n" // |12|8|4|0|
+ "precr.qb.ph $t5, $t6, $t5 \n" // |28|24|20|16|
+ "addiu %[src_ptr], %[src_ptr], 32 \n"
+ "addiu $t9, $t9, -1 \n"
+ "sw $t1, 0(%[dst]) \n"
+ "sw $t5, 4(%[dst]) \n"
+ "bgtz $t9, 1b \n"
+ " addiu %[dst], %[dst], 8 \n"
+
+ "2: \n"
+ "andi $t9, %[dst_width], 7 \n" // residue
+ "beqz $t9, 3f \n"
+ " nop \n"
+
+ "21: \n"
+ "lbu $t1, 0(%[src_ptr]) \n"
+ "addiu %[src_ptr], %[src_ptr], 4 \n"
+ "addiu $t9, $t9, -1 \n"
+ "sb $t1, 0(%[dst]) \n"
+ "bgtz $t9, 21b \n"
+ " addiu %[dst], %[dst], 1 \n"
+
+ "3: \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [dst] "+r" (dst)
+ : [dst_width] "r" (dst_width)
+ : "t1", "t2", "t3", "t4", "t5",
+ "t6", "t7", "t8", "t9"
+ );
+}
+
+void ScaleRowDown4Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ intptr_t stride = src_stride;
+ const uint8* s1 = src_ptr + stride;
+ const uint8* s2 = s1 + stride;
+ const uint8* s3 = s2 + stride;
+
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ "srl $t9, %[dst_width], 1 \n"
+ "andi $t8, %[dst_width], 1 \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t1, 0(%[s1]) \n" // |7|6|5|4|
+ "lw $t2, 0(%[s2]) \n" // |11|10|9|8|
+ "lw $t3, 0(%[s3]) \n" // |15|14|13|12|
+ "lw $t4, 4(%[src_ptr]) \n" // |19|18|17|16|
+ "lw $t5, 4(%[s1]) \n" // |23|22|21|20|
+ "lw $t6, 4(%[s2]) \n" // |27|26|25|24|
+ "lw $t7, 4(%[s3]) \n" // |31|30|29|28|
+ "raddu.w.qb $t0, $t0 \n" // |3 + 2 + 1 + 0|
+ "raddu.w.qb $t1, $t1 \n" // |7 + 6 + 5 + 4|
+ "raddu.w.qb $t2, $t2 \n" // |11 + 10 + 9 + 8|
+ "raddu.w.qb $t3, $t3 \n" // |15 + 14 + 13 + 12|
+ "raddu.w.qb $t4, $t4 \n" // |19 + 18 + 17 + 16|
+ "raddu.w.qb $t5, $t5 \n" // |23 + 22 + 21 + 20|
+ "raddu.w.qb $t6, $t6 \n" // |27 + 26 + 25 + 24|
+ "raddu.w.qb $t7, $t7 \n" // |31 + 30 + 29 + 28|
+ "add $t0, $t0, $t1 \n"
+ "add $t1, $t2, $t3 \n"
+ "add $t0, $t0, $t1 \n"
+ "add $t4, $t4, $t5 \n"
+ "add $t6, $t6, $t7 \n"
+ "add $t4, $t4, $t6 \n"
+ "shra_r.w $t0, $t0, 4 \n"
+ "shra_r.w $t4, $t4, 4 \n"
+ "sb $t0, 0(%[dst]) \n"
+ "sb $t4, 1(%[dst]) \n"
+ "addiu %[src_ptr], %[src_ptr], 8 \n"
+ "addiu %[s1], %[s1], 8 \n"
+ "addiu %[s2], %[s2], 8 \n"
+ "addiu %[s3], %[s3], 8 \n"
+ "addiu $t9, $t9, -1 \n"
+ "bgtz $t9, 1b \n"
+ " addiu %[dst], %[dst], 2 \n"
+ "beqz $t8, 2f \n"
+ " nop \n"
+
+ "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t1, 0(%[s1]) \n" // |7|6|5|4|
+ "lw $t2, 0(%[s2]) \n" // |11|10|9|8|
+ "lw $t3, 0(%[s3]) \n" // |15|14|13|12|
+ "raddu.w.qb $t0, $t0 \n" // |3 + 2 + 1 + 0|
+ "raddu.w.qb $t1, $t1 \n" // |7 + 6 + 5 + 4|
+ "raddu.w.qb $t2, $t2 \n" // |11 + 10 + 9 + 8|
+ "raddu.w.qb $t3, $t3 \n" // |15 + 14 + 13 + 12|
+ "add $t0, $t0, $t1 \n"
+ "add $t1, $t2, $t3 \n"
+ "add $t0, $t0, $t1 \n"
+ "shra_r.w $t0, $t0, 4 \n"
+ "sb $t0, 0(%[dst]) \n"
+
+ "2: \n"
+ ".set pop \n"
+
+ : [src_ptr] "+r" (src_ptr),
+ [dst] "+r" (dst),
+ [s1] "+r" (s1),
+ [s2] "+r" (s2),
+ [s3] "+r" (s3)
+ : [dst_width] "r" (dst_width)
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6","t7", "t8", "t9"
+ );
+}
+
+void ScaleRowDown34_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t1, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t2, 4(%[src_ptr]) \n" // |7|6|5|4|
+ "lw $t3, 8(%[src_ptr]) \n" // |11|10|9|8|
+ "lw $t4, 12(%[src_ptr]) \n" // |15|14|13|12|
+ "lw $t5, 16(%[src_ptr]) \n" // |19|18|17|16|
+ "lw $t6, 20(%[src_ptr]) \n" // |23|22|21|20|
+ "lw $t7, 24(%[src_ptr]) \n" // |27|26|25|24|
+ "lw $t8, 28(%[src_ptr]) \n" // |31|30|29|28|
+ "precrq.qb.ph $t0, $t2, $t4 \n" // |7|5|15|13|
+ "precrq.qb.ph $t9, $t6, $t8 \n" // |23|21|31|30|
+ "addiu %[dst_width], %[dst_width], -24 \n"
+ "ins $t1, $t1, 8, 16 \n" // |3|1|0|X|
+ "ins $t4, $t0, 8, 16 \n" // |X|15|13|12|
+ "ins $t5, $t5, 8, 16 \n" // |19|17|16|X|
+ "ins $t8, $t9, 8, 16 \n" // |X|31|29|28|
+ "addiu %[src_ptr], %[src_ptr], 32 \n"
+ "packrl.ph $t0, $t3, $t0 \n" // |9|8|7|5|
+ "packrl.ph $t9, $t7, $t9 \n" // |25|24|23|21|
+ "prepend $t1, $t2, 8 \n" // |4|3|1|0|
+ "prepend $t3, $t4, 24 \n" // |15|13|12|11|
+ "prepend $t5, $t6, 8 \n" // |20|19|17|16|
+ "prepend $t7, $t8, 24 \n" // |31|29|28|27|
+ "sw $t1, 0(%[dst]) \n"
+ "sw $t0, 4(%[dst]) \n"
+ "sw $t3, 8(%[dst]) \n"
+ "sw $t5, 12(%[dst]) \n"
+ "sw $t9, 16(%[dst]) \n"
+ "sw $t7, 20(%[dst]) \n"
+ "bnez %[dst_width], 1b \n"
+ " addiu %[dst], %[dst], 24 \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [dst] "+r" (dst),
+ [dst_width] "+r" (dst_width)
+ :
+ : "t0", "t1", "t2", "t3", "t4", "t5",
+ "t6","t7", "t8", "t9"
+ );
+}
+
+void ScaleRowDown34_0_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "repl.ph $t3, 3 \n" // 0x00030003
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0|
+ "lwx $t1, %[src_stride](%[src_ptr]) \n" // |T3|T2|T1|T0|
+ "rotr $t2, $t0, 8 \n" // |S0|S3|S2|S1|
+ "rotr $t6, $t1, 8 \n" // |T0|T3|T2|T1|
+ "muleu_s.ph.qbl $t4, $t2, $t3 \n" // |S0*3|S3*3|
+ "muleu_s.ph.qbl $t5, $t6, $t3 \n" // |T0*3|T3*3|
+ "andi $t0, $t2, 0xFFFF \n" // |0|0|S2|S1|
+ "andi $t1, $t6, 0xFFFF \n" // |0|0|T2|T1|
+ "raddu.w.qb $t0, $t0 \n"
+ "raddu.w.qb $t1, $t1 \n"
+ "shra_r.w $t0, $t0, 1 \n"
+ "shra_r.w $t1, $t1, 1 \n"
+ "preceu.ph.qbr $t2, $t2 \n" // |0|S2|0|S1|
+ "preceu.ph.qbr $t6, $t6 \n" // |0|T2|0|T1|
+ "rotr $t2, $t2, 16 \n" // |0|S1|0|S2|
+ "rotr $t6, $t6, 16 \n" // |0|T1|0|T2|
+ "addu.ph $t2, $t2, $t4 \n"
+ "addu.ph $t6, $t6, $t5 \n"
+ "sll $t5, $t0, 1 \n"
+ "add $t0, $t5, $t0 \n"
+ "shra_r.ph $t2, $t2, 2 \n"
+ "shra_r.ph $t6, $t6, 2 \n"
+ "shll.ph $t4, $t2, 1 \n"
+ "addq.ph $t4, $t4, $t2 \n"
+ "addu $t0, $t0, $t1 \n"
+ "addiu %[src_ptr], %[src_ptr], 4 \n"
+ "shra_r.w $t0, $t0, 2 \n"
+ "addu.ph $t6, $t6, $t4 \n"
+ "shra_r.ph $t6, $t6, 2 \n"
+ "srl $t1, $t6, 16 \n"
+ "addiu %[dst_width], %[dst_width], -3 \n"
+ "sb $t1, 0(%[d]) \n"
+ "sb $t0, 1(%[d]) \n"
+ "sb $t6, 2(%[d]) \n"
+ "bgtz %[dst_width], 1b \n"
+ " addiu %[d], %[d], 3 \n"
+ "3: \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [src_stride] "+r" (src_stride),
+ [d] "+r" (d),
+ [dst_width] "+r" (dst_width)
+ :
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6"
+ );
+}
+
+void ScaleRowDown34_1_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* d, int dst_width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+ "repl.ph $t2, 3 \n" // 0x00030003
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0|
+ "lwx $t1, %[src_stride](%[src_ptr]) \n" // |T3|T2|T1|T0|
+ "rotr $t4, $t0, 8 \n" // |S0|S3|S2|S1|
+ "rotr $t6, $t1, 8 \n" // |T0|T3|T2|T1|
+ "muleu_s.ph.qbl $t3, $t4, $t2 \n" // |S0*3|S3*3|
+ "muleu_s.ph.qbl $t5, $t6, $t2 \n" // |T0*3|T3*3|
+ "andi $t0, $t4, 0xFFFF \n" // |0|0|S2|S1|
+ "andi $t1, $t6, 0xFFFF \n" // |0|0|T2|T1|
+ "raddu.w.qb $t0, $t0 \n"
+ "raddu.w.qb $t1, $t1 \n"
+ "shra_r.w $t0, $t0, 1 \n"
+ "shra_r.w $t1, $t1, 1 \n"
+ "preceu.ph.qbr $t4, $t4 \n" // |0|S2|0|S1|
+ "preceu.ph.qbr $t6, $t6 \n" // |0|T2|0|T1|
+ "rotr $t4, $t4, 16 \n" // |0|S1|0|S2|
+ "rotr $t6, $t6, 16 \n" // |0|T1|0|T2|
+ "addu.ph $t4, $t4, $t3 \n"
+ "addu.ph $t6, $t6, $t5 \n"
+ "shra_r.ph $t6, $t6, 2 \n"
+ "shra_r.ph $t4, $t4, 2 \n"
+ "addu.ph $t6, $t6, $t4 \n"
+ "addiu %[src_ptr], %[src_ptr], 4 \n"
+ "shra_r.ph $t6, $t6, 1 \n"
+ "addu $t0, $t0, $t1 \n"
+ "addiu %[dst_width], %[dst_width], -3 \n"
+ "shra_r.w $t0, $t0, 1 \n"
+ "srl $t1, $t6, 16 \n"
+ "sb $t1, 0(%[d]) \n"
+ "sb $t0, 1(%[d]) \n"
+ "sb $t6, 2(%[d]) \n"
+ "bgtz %[dst_width], 1b \n"
+ " addiu %[d], %[d], 3 \n"
+ "3: \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [src_stride] "+r" (src_stride),
+ [d] "+r" (d),
+ [dst_width] "+r" (dst_width)
+ :
+ : "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6"
+ );
+}
+
+void ScaleRowDown38_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |3|2|1|0|
+ "lw $t1, 4(%[src_ptr]) \n" // |7|6|5|4|
+ "lw $t2, 8(%[src_ptr]) \n" // |11|10|9|8|
+ "lw $t3, 12(%[src_ptr]) \n" // |15|14|13|12|
+ "lw $t4, 16(%[src_ptr]) \n" // |19|18|17|16|
+ "lw $t5, 20(%[src_ptr]) \n" // |23|22|21|20|
+ "lw $t6, 24(%[src_ptr]) \n" // |27|26|25|24|
+ "lw $t7, 28(%[src_ptr]) \n" // |31|30|29|28|
+ "wsbh $t0, $t0 \n" // |2|3|0|1|
+ "wsbh $t6, $t6 \n" // |26|27|24|25|
+ "srl $t0, $t0, 8 \n" // |X|2|3|0|
+ "srl $t3, $t3, 16 \n" // |X|X|15|14|
+ "srl $t5, $t5, 16 \n" // |X|X|23|22|
+ "srl $t7, $t7, 16 \n" // |X|X|31|30|
+ "ins $t1, $t2, 24, 8 \n" // |8|6|5|4|
+ "ins $t6, $t5, 0, 8 \n" // |26|27|24|22|
+ "ins $t1, $t0, 0, 16 \n" // |8|6|3|0|
+ "ins $t6, $t7, 24, 8 \n" // |30|27|24|22|
+ "prepend $t2, $t3, 24 \n" // |X|15|14|11|
+ "ins $t4, $t4, 16, 8 \n" // |19|16|17|X|
+ "ins $t4, $t2, 0, 16 \n" // |19|16|14|11|
+ "addiu %[src_ptr], %[src_ptr], 32 \n"
+ "addiu %[dst_width], %[dst_width], -12 \n"
+ "addiu $t8,%[dst_width], -12 \n"
+ "sw $t1, 0(%[dst]) \n"
+ "sw $t4, 4(%[dst]) \n"
+ "sw $t6, 8(%[dst]) \n"
+ "bgez $t8, 1b \n"
+ " addiu %[dst], %[dst], 12 \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [dst] "+r" (dst),
+ [dst_width] "+r" (dst_width)
+ :
+ : "t0", "t1", "t2", "t3", "t4",
+ "t5", "t6", "t7", "t8"
+ );
+}
+
+void ScaleRowDown38_2_Box_MIPS_DSPR2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ intptr_t stride = src_stride;
+ const uint8* t = src_ptr + stride;
+ const int c = 0x2AAA;
+
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0|
+ "lw $t1, 4(%[src_ptr]) \n" // |S7|S6|S5|S4|
+ "lw $t2, 0(%[t]) \n" // |T3|T2|T1|T0|
+ "lw $t3, 4(%[t]) \n" // |T7|T6|T5|T4|
+ "rotr $t1, $t1, 16 \n" // |S5|S4|S7|S6|
+ "packrl.ph $t4, $t1, $t3 \n" // |S7|S6|T7|T6|
+ "packrl.ph $t5, $t3, $t1 \n" // |T5|T4|S5|S4|
+ "raddu.w.qb $t4, $t4 \n" // S7+S6+T7+T6
+ "raddu.w.qb $t5, $t5 \n" // T5+T4+S5+S4
+ "precrq.qb.ph $t6, $t0, $t2 \n" // |S3|S1|T3|T1|
+ "precrq.qb.ph $t6, $t6, $t6 \n" // |S3|T3|S3|T3|
+ "srl $t4, $t4, 2 \n" // t4 / 4
+ "srl $t6, $t6, 16 \n" // |0|0|S3|T3|
+ "raddu.w.qb $t6, $t6 \n" // 0+0+S3+T3
+ "addu $t6, $t5, $t6 \n"
+ "mul $t6, $t6, %[c] \n" // t6 * 0x2AAA
+ "sll $t0, $t0, 8 \n" // |S2|S1|S0|0|
+ "sll $t2, $t2, 8 \n" // |T2|T1|T0|0|
+ "raddu.w.qb $t0, $t0 \n" // S2+S1+S0+0
+ "raddu.w.qb $t2, $t2 \n" // T2+T1+T0+0
+ "addu $t0, $t0, $t2 \n"
+ "mul $t0, $t0, %[c] \n" // t0 * 0x2AAA
+ "addiu %[src_ptr], %[src_ptr], 8 \n"
+ "addiu %[t], %[t], 8 \n"
+ "addiu %[dst_width], %[dst_width], -3 \n"
+ "addiu %[dst_ptr], %[dst_ptr], 3 \n"
+ "srl $t6, $t6, 16 \n"
+ "srl $t0, $t0, 16 \n"
+ "sb $t4, -1(%[dst_ptr]) \n"
+ "sb $t6, -2(%[dst_ptr]) \n"
+ "bgtz %[dst_width], 1b \n"
+ " sb $t0, -3(%[dst_ptr]) \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [dst_ptr] "+r" (dst_ptr),
+ [t] "+r" (t),
+ [dst_width] "+r" (dst_width)
+ : [c] "r" (c)
+ : "t0", "t1", "t2", "t3", "t4", "t5", "t6"
+ );
+}
+
+void ScaleRowDown38_3_Box_MIPS_DSPR2(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ intptr_t stride = src_stride;
+ const uint8* s1 = src_ptr + stride;
+ stride += stride;
+ const uint8* s2 = src_ptr + stride;
+ const int c1 = 0x1C71;
+ const int c2 = 0x2AAA;
+
+ __asm__ __volatile__ (
+ ".set push \n"
+ ".set noreorder \n"
+
+ ".p2align 2 \n"
+ "1: \n"
+ "lw $t0, 0(%[src_ptr]) \n" // |S3|S2|S1|S0|
+ "lw $t1, 4(%[src_ptr]) \n" // |S7|S6|S5|S4|
+ "lw $t2, 0(%[s1]) \n" // |T3|T2|T1|T0|
+ "lw $t3, 4(%[s1]) \n" // |T7|T6|T5|T4|
+ "lw $t4, 0(%[s2]) \n" // |R3|R2|R1|R0|
+ "lw $t5, 4(%[s2]) \n" // |R7|R6|R5|R4|
+ "rotr $t1, $t1, 16 \n" // |S5|S4|S7|S6|
+ "packrl.ph $t6, $t1, $t3 \n" // |S7|S6|T7|T6|
+ "raddu.w.qb $t6, $t6 \n" // S7+S6+T7+T6
+ "packrl.ph $t7, $t3, $t1 \n" // |T5|T4|S5|S4|
+ "raddu.w.qb $t7, $t7 \n" // T5+T4+S5+S4
+ "sll $t8, $t5, 16 \n" // |R5|R4|0|0|
+ "raddu.w.qb $t8, $t8 \n" // R5+R4
+ "addu $t7, $t7, $t8 \n"
+ "srl $t8, $t5, 16 \n" // |0|0|R7|R6|
+ "raddu.w.qb $t8, $t8 \n" // R7 + R6
+ "addu $t6, $t6, $t8 \n"
+ "mul $t6, $t6, %[c2] \n" // t6 * 0x2AAA
+ "precrq.qb.ph $t8, $t0, $t2 \n" // |S3|S1|T3|T1|
+ "precrq.qb.ph $t8, $t8, $t4 \n" // |S3|T3|R3|R1|
+ "srl $t8, $t8, 8 \n" // |0|S3|T3|R3|
+ "raddu.w.qb $t8, $t8 \n" // S3 + T3 + R3
+ "addu $t7, $t7, $t8 \n"
+ "mul $t7, $t7, %[c1] \n" // t7 * 0x1C71
+ "sll $t0, $t0, 8 \n" // |S2|S1|S0|0|
+ "sll $t2, $t2, 8 \n" // |T2|T1|T0|0|
+ "sll $t4, $t4, 8 \n" // |R2|R1|R0|0|
+ "raddu.w.qb $t0, $t0 \n"
+ "raddu.w.qb $t2, $t2 \n"
+ "raddu.w.qb $t4, $t4 \n"
+ "addu $t0, $t0, $t2 \n"
+ "addu $t0, $t0, $t4 \n"
+ "mul $t0, $t0, %[c1] \n" // t0 * 0x1C71
+ "addiu %[src_ptr], %[src_ptr], 8 \n"
+ "addiu %[s1], %[s1], 8 \n"
+ "addiu %[s2], %[s2], 8 \n"
+ "addiu %[dst_width], %[dst_width], -3 \n"
+ "addiu %[dst_ptr], %[dst_ptr], 3 \n"
+ "srl $t6, $t6, 16 \n"
+ "srl $t7, $t7, 16 \n"
+ "srl $t0, $t0, 16 \n"
+ "sb $t6, -1(%[dst_ptr]) \n"
+ "sb $t7, -2(%[dst_ptr]) \n"
+ "bgtz %[dst_width], 1b \n"
+ " sb $t0, -3(%[dst_ptr]) \n"
+ ".set pop \n"
+ : [src_ptr] "+r" (src_ptr),
+ [dst_ptr] "+r" (dst_ptr),
+ [s1] "+r" (s1),
+ [s2] "+r" (s2),
+ [dst_width] "+r" (dst_width)
+ : [c1] "r" (c1), [c2] "r" (c2)
+ : "t0", "t1", "t2", "t3", "t4",
+ "t5", "t6", "t7", "t8"
+ );
+}
+
+#endif // defined(__mips_dsp) && (__mips_dsp_rev >= 2)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
diff --git a/media/libaom/src/third_party/libyuv/source/scale_neon.cc b/media/libaom/src/third_party/libyuv/source/scale_neon.cc
new file mode 100644
index 000000000..7825878e9
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_neon.cc
@@ -0,0 +1,1037 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC Neon.
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__ARM_NEON__) && \
+ !defined(__aarch64__)
+
+// NEON downscalers with interpolation.
+// Provided by Fritz Koenig
+
+// Read 32x1 throw away even pixels, and write 16x1.
+void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ // load even pixels into q0, odd into q1
+ MEMACCESS(0)
+ "vld2.8 {q0, q1}, [%0]! \n"
+ "subs %2, %2, #16 \n" // 16 processed per loop
+ MEMACCESS(1)
+ "vst1.8 {q1}, [%1]! \n" // store odd pixels
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst), // %1
+ "+r"(dst_width) // %2
+ :
+ : "q0", "q1" // Clobber List
+ );
+}
+
+// Read 32x1 average down and write 16x1.
+void ScaleRowDown2Linear_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0, q1}, [%0]! \n" // load pixels and post inc
+ "subs %2, %2, #16 \n" // 16 processed per loop
+ "vpaddl.u8 q0, q0 \n" // add adjacent
+ "vpaddl.u8 q1, q1 \n"
+ "vrshrn.u16 d0, q0, #1 \n" // downshift, round and pack
+ "vrshrn.u16 d1, q1, #1 \n"
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst), // %1
+ "+r"(dst_width) // %2
+ :
+ : "q0", "q1" // Clobber List
+ );
+}
+
+// Read 32x2 average down and write 16x1.
+void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ // change the stride to row 2 pointer
+ "add %1, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0, q1}, [%0]! \n" // load row 1 and post inc
+ MEMACCESS(1)
+ "vld1.8 {q2, q3}, [%1]! \n" // load row 2 and post inc
+ "subs %3, %3, #16 \n" // 16 processed per loop
+ "vpaddl.u8 q0, q0 \n" // row 1 add adjacent
+ "vpaddl.u8 q1, q1 \n"
+ "vpadal.u8 q0, q2 \n" // row 2 add adjacent + row1
+ "vpadal.u8 q1, q3 \n"
+ "vrshrn.u16 d0, q0, #2 \n" // downshift, round and pack
+ "vrshrn.u16 d1, q1, #2 \n"
+ MEMACCESS(2)
+ "vst1.8 {q0}, [%2]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(src_stride), // %1
+ "+r"(dst), // %2
+ "+r"(dst_width) // %3
+ :
+ : "q0", "q1", "q2", "q3" // Clobber List
+ );
+}
+
+void ScaleRowDown4_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
+ "subs %2, %2, #8 \n" // 8 processed per loop
+ MEMACCESS(1)
+ "vst1.8 {d2}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :
+ : "q0", "q1", "memory", "cc"
+ );
+}
+
+void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ const uint8* src_ptr1 = src_ptr + src_stride;
+ const uint8* src_ptr2 = src_ptr + src_stride * 2;
+ const uint8* src_ptr3 = src_ptr + src_stride * 3;
+asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0]! \n" // load up 16x4
+ MEMACCESS(3)
+ "vld1.8 {q1}, [%3]! \n"
+ MEMACCESS(4)
+ "vld1.8 {q2}, [%4]! \n"
+ MEMACCESS(5)
+ "vld1.8 {q3}, [%5]! \n"
+ "subs %2, %2, #4 \n"
+ "vpaddl.u8 q0, q0 \n"
+ "vpadal.u8 q0, q1 \n"
+ "vpadal.u8 q0, q2 \n"
+ "vpadal.u8 q0, q3 \n"
+ "vpaddl.u16 q0, q0 \n"
+ "vrshrn.u32 d0, q0, #4 \n" // divide by 16 w/rounding
+ "vmovn.u16 d0, q0 \n"
+ MEMACCESS(1)
+ "vst1.32 {d0[0]}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_ptr1), // %3
+ "+r"(src_ptr2), // %4
+ "+r"(src_ptr3) // %5
+ :
+ : "q0", "q1", "q2", "q3", "memory", "cc"
+ );
+}
+
+// Down scale from 4 to 3 pixels. Use the neon multilane read/write
+// to load up the every 4th pixel into a 4 different registers.
+// Point samples 32 pixels to 24 pixels.
+void ScaleRowDown34_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
+ "subs %2, %2, #24 \n"
+ "vmov d2, d3 \n" // order d0, d1, d2
+ MEMACCESS(1)
+ "vst3.8 {d0, d1, d2}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :
+ : "d0", "d1", "d2", "d3", "memory", "cc"
+ );
+}
+
+void ScaleRowDown34_0_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "vmov.u8 d24, #3 \n"
+ "add %3, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
+ MEMACCESS(3)
+ "vld4.8 {d4, d5, d6, d7}, [%3]! \n" // src line 1
+ "subs %2, %2, #24 \n"
+
+ // filter src line 0 with src line 1
+ // expand chars to shorts to allow for room
+ // when adding lines together
+ "vmovl.u8 q8, d4 \n"
+ "vmovl.u8 q9, d5 \n"
+ "vmovl.u8 q10, d6 \n"
+ "vmovl.u8 q11, d7 \n"
+
+ // 3 * line_0 + line_1
+ "vmlal.u8 q8, d0, d24 \n"
+ "vmlal.u8 q9, d1, d24 \n"
+ "vmlal.u8 q10, d2, d24 \n"
+ "vmlal.u8 q11, d3, d24 \n"
+
+ // (3 * line_0 + line_1) >> 2
+ "vqrshrn.u16 d0, q8, #2 \n"
+ "vqrshrn.u16 d1, q9, #2 \n"
+ "vqrshrn.u16 d2, q10, #2 \n"
+ "vqrshrn.u16 d3, q11, #2 \n"
+
+ // a0 = (src[0] * 3 + s[1] * 1) >> 2
+ "vmovl.u8 q8, d1 \n"
+ "vmlal.u8 q8, d0, d24 \n"
+ "vqrshrn.u16 d0, q8, #2 \n"
+
+ // a1 = (src[1] * 1 + s[2] * 1) >> 1
+ "vrhadd.u8 d1, d1, d2 \n"
+
+ // a2 = (src[2] * 1 + s[3] * 3) >> 2
+ "vmovl.u8 q8, d2 \n"
+ "vmlal.u8 q8, d3, d24 \n"
+ "vqrshrn.u16 d2, q8, #2 \n"
+
+ MEMACCESS(1)
+ "vst3.8 {d0, d1, d2}, [%1]! \n"
+
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_stride) // %3
+ :
+ : "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11", "d24", "memory", "cc"
+ );
+}
+
+void ScaleRowDown34_1_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "vmov.u8 d24, #3 \n"
+ "add %3, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n" // src line 0
+ MEMACCESS(3)
+ "vld4.8 {d4, d5, d6, d7}, [%3]! \n" // src line 1
+ "subs %2, %2, #24 \n"
+ // average src line 0 with src line 1
+ "vrhadd.u8 q0, q0, q2 \n"
+ "vrhadd.u8 q1, q1, q3 \n"
+
+ // a0 = (src[0] * 3 + s[1] * 1) >> 2
+ "vmovl.u8 q3, d1 \n"
+ "vmlal.u8 q3, d0, d24 \n"
+ "vqrshrn.u16 d0, q3, #2 \n"
+
+ // a1 = (src[1] * 1 + s[2] * 1) >> 1
+ "vrhadd.u8 d1, d1, d2 \n"
+
+ // a2 = (src[2] * 1 + s[3] * 3) >> 2
+ "vmovl.u8 q3, d2 \n"
+ "vmlal.u8 q3, d3, d24 \n"
+ "vqrshrn.u16 d2, q3, #2 \n"
+
+ MEMACCESS(1)
+ "vst3.8 {d0, d1, d2}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_stride) // %3
+ :
+ : "r4", "q0", "q1", "q2", "q3", "d24", "memory", "cc"
+ );
+}
+
+#define HAS_SCALEROWDOWN38_NEON
+static uvec8 kShuf38 =
+ { 0, 3, 6, 8, 11, 14, 16, 19, 22, 24, 27, 30, 0, 0, 0, 0 };
+static uvec8 kShuf38_2 =
+ { 0, 8, 16, 2, 10, 17, 4, 12, 18, 6, 14, 19, 0, 0, 0, 0 };
+static vec16 kMult38_Div6 =
+ { 65536 / 12, 65536 / 12, 65536 / 12, 65536 / 12,
+ 65536 / 12, 65536 / 12, 65536 / 12, 65536 / 12 };
+static vec16 kMult38_Div9 =
+ { 65536 / 18, 65536 / 18, 65536 / 18, 65536 / 18,
+ 65536 / 18, 65536 / 18, 65536 / 18, 65536 / 18 };
+
+// 32 -> 12
+void ScaleRowDown38_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ MEMACCESS(3)
+ "vld1.8 {q3}, [%3] \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d0, d1, d2, d3}, [%0]! \n"
+ "subs %2, %2, #12 \n"
+ "vtbl.u8 d4, {d0, d1, d2, d3}, d6 \n"
+ "vtbl.u8 d5, {d0, d1, d2, d3}, d7 \n"
+ MEMACCESS(1)
+ "vst1.8 {d4}, [%1]! \n"
+ MEMACCESS(1)
+ "vst1.32 {d5[0]}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"(&kShuf38) // %3
+ : "d0", "d1", "d2", "d3", "d4", "d5", "memory", "cc"
+ );
+}
+
+// 32x3 -> 12x1
+void OMITFP ScaleRowDown38_3_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ const uint8* src_ptr1 = src_ptr + src_stride * 2;
+
+ asm volatile (
+ MEMACCESS(5)
+ "vld1.16 {q13}, [%5] \n"
+ MEMACCESS(6)
+ "vld1.8 {q14}, [%6] \n"
+ MEMACCESS(7)
+ "vld1.8 {q15}, [%7] \n"
+ "add %3, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+
+ // d0 = 00 40 01 41 02 42 03 43
+ // d1 = 10 50 11 51 12 52 13 53
+ // d2 = 20 60 21 61 22 62 23 63
+ // d3 = 30 70 31 71 32 72 33 73
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n"
+ MEMACCESS(3)
+ "vld4.8 {d4, d5, d6, d7}, [%3]! \n"
+ MEMACCESS(4)
+ "vld4.8 {d16, d17, d18, d19}, [%4]! \n"
+ "subs %2, %2, #12 \n"
+
+ // Shuffle the input data around to get align the data
+ // so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
+ // d0 = 00 10 01 11 02 12 03 13
+ // d1 = 40 50 41 51 42 52 43 53
+ "vtrn.u8 d0, d1 \n"
+ "vtrn.u8 d4, d5 \n"
+ "vtrn.u8 d16, d17 \n"
+
+ // d2 = 20 30 21 31 22 32 23 33
+ // d3 = 60 70 61 71 62 72 63 73
+ "vtrn.u8 d2, d3 \n"
+ "vtrn.u8 d6, d7 \n"
+ "vtrn.u8 d18, d19 \n"
+
+ // d0 = 00+10 01+11 02+12 03+13
+ // d2 = 40+50 41+51 42+52 43+53
+ "vpaddl.u8 q0, q0 \n"
+ "vpaddl.u8 q2, q2 \n"
+ "vpaddl.u8 q8, q8 \n"
+
+ // d3 = 60+70 61+71 62+72 63+73
+ "vpaddl.u8 d3, d3 \n"
+ "vpaddl.u8 d7, d7 \n"
+ "vpaddl.u8 d19, d19 \n"
+
+ // combine source lines
+ "vadd.u16 q0, q2 \n"
+ "vadd.u16 q0, q8 \n"
+ "vadd.u16 d4, d3, d7 \n"
+ "vadd.u16 d4, d19 \n"
+
+ // dst_ptr[3] = (s[6 + st * 0] + s[7 + st * 0]
+ // + s[6 + st * 1] + s[7 + st * 1]
+ // + s[6 + st * 2] + s[7 + st * 2]) / 6
+ "vqrdmulh.s16 q2, q2, q13 \n"
+ "vmovn.u16 d4, q2 \n"
+
+ // Shuffle 2,3 reg around so that 2 can be added to the
+ // 0,1 reg and 3 can be added to the 4,5 reg. This
+ // requires expanding from u8 to u16 as the 0,1 and 4,5
+ // registers are already expanded. Then do transposes
+ // to get aligned.
+ // q2 = xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
+ "vmovl.u8 q1, d2 \n"
+ "vmovl.u8 q3, d6 \n"
+ "vmovl.u8 q9, d18 \n"
+
+ // combine source lines
+ "vadd.u16 q1, q3 \n"
+ "vadd.u16 q1, q9 \n"
+
+ // d4 = xx 20 xx 30 xx 22 xx 32
+ // d5 = xx 21 xx 31 xx 23 xx 33
+ "vtrn.u32 d2, d3 \n"
+
+ // d4 = xx 20 xx 21 xx 22 xx 23
+ // d5 = xx 30 xx 31 xx 32 xx 33
+ "vtrn.u16 d2, d3 \n"
+
+ // 0+1+2, 3+4+5
+ "vadd.u16 q0, q1 \n"
+
+ // Need to divide, but can't downshift as the the value
+ // isn't a power of 2. So multiply by 65536 / n
+ // and take the upper 16 bits.
+ "vqrdmulh.s16 q0, q0, q15 \n"
+
+ // Align for table lookup, vtbl requires registers to
+ // be adjacent
+ "vmov.u8 d2, d4 \n"
+
+ "vtbl.u8 d3, {d0, d1, d2}, d28 \n"
+ "vtbl.u8 d4, {d0, d1, d2}, d29 \n"
+
+ MEMACCESS(1)
+ "vst1.8 {d3}, [%1]! \n"
+ MEMACCESS(1)
+ "vst1.32 {d4[0]}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_stride), // %3
+ "+r"(src_ptr1) // %4
+ : "r"(&kMult38_Div6), // %5
+ "r"(&kShuf38_2), // %6
+ "r"(&kMult38_Div9) // %7
+ : "q0", "q1", "q2", "q3", "q8", "q9", "q13", "q14", "q15", "memory", "cc"
+ );
+}
+
+// 32x2 -> 12x1
+void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ MEMACCESS(4)
+ "vld1.16 {q13}, [%4] \n"
+ MEMACCESS(5)
+ "vld1.8 {q14}, [%5] \n"
+ "add %3, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+
+ // d0 = 00 40 01 41 02 42 03 43
+ // d1 = 10 50 11 51 12 52 13 53
+ // d2 = 20 60 21 61 22 62 23 63
+ // d3 = 30 70 31 71 32 72 33 73
+ MEMACCESS(0)
+ "vld4.8 {d0, d1, d2, d3}, [%0]! \n"
+ MEMACCESS(3)
+ "vld4.8 {d4, d5, d6, d7}, [%3]! \n"
+ "subs %2, %2, #12 \n"
+
+ // Shuffle the input data around to get align the data
+ // so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
+ // d0 = 00 10 01 11 02 12 03 13
+ // d1 = 40 50 41 51 42 52 43 53
+ "vtrn.u8 d0, d1 \n"
+ "vtrn.u8 d4, d5 \n"
+
+ // d2 = 20 30 21 31 22 32 23 33
+ // d3 = 60 70 61 71 62 72 63 73
+ "vtrn.u8 d2, d3 \n"
+ "vtrn.u8 d6, d7 \n"
+
+ // d0 = 00+10 01+11 02+12 03+13
+ // d2 = 40+50 41+51 42+52 43+53
+ "vpaddl.u8 q0, q0 \n"
+ "vpaddl.u8 q2, q2 \n"
+
+ // d3 = 60+70 61+71 62+72 63+73
+ "vpaddl.u8 d3, d3 \n"
+ "vpaddl.u8 d7, d7 \n"
+
+ // combine source lines
+ "vadd.u16 q0, q2 \n"
+ "vadd.u16 d4, d3, d7 \n"
+
+ // dst_ptr[3] = (s[6] + s[7] + s[6+st] + s[7+st]) / 4
+ "vqrshrn.u16 d4, q2, #2 \n"
+
+ // Shuffle 2,3 reg around so that 2 can be added to the
+ // 0,1 reg and 3 can be added to the 4,5 reg. This
+ // requires expanding from u8 to u16 as the 0,1 and 4,5
+ // registers are already expanded. Then do transposes
+ // to get aligned.
+ // q2 = xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
+ "vmovl.u8 q1, d2 \n"
+ "vmovl.u8 q3, d6 \n"
+
+ // combine source lines
+ "vadd.u16 q1, q3 \n"
+
+ // d4 = xx 20 xx 30 xx 22 xx 32
+ // d5 = xx 21 xx 31 xx 23 xx 33
+ "vtrn.u32 d2, d3 \n"
+
+ // d4 = xx 20 xx 21 xx 22 xx 23
+ // d5 = xx 30 xx 31 xx 32 xx 33
+ "vtrn.u16 d2, d3 \n"
+
+ // 0+1+2, 3+4+5
+ "vadd.u16 q0, q1 \n"
+
+ // Need to divide, but can't downshift as the the value
+ // isn't a power of 2. So multiply by 65536 / n
+ // and take the upper 16 bits.
+ "vqrdmulh.s16 q0, q0, q13 \n"
+
+ // Align for table lookup, vtbl requires registers to
+ // be adjacent
+ "vmov.u8 d2, d4 \n"
+
+ "vtbl.u8 d3, {d0, d1, d2}, d28 \n"
+ "vtbl.u8 d4, {d0, d1, d2}, d29 \n"
+
+ MEMACCESS(1)
+ "vst1.8 {d3}, [%1]! \n"
+ MEMACCESS(1)
+ "vst1.32 {d4[0]}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_stride) // %3
+ : "r"(&kMult38_Div6), // %4
+ "r"(&kShuf38_2) // %5
+ : "q0", "q1", "q2", "q3", "q13", "q14", "memory", "cc"
+ );
+}
+
+void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int src_width, int src_height) {
+ const uint8* src_tmp = NULL;
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ "mov %0, %1 \n"
+ "mov r12, %5 \n"
+ "veor q2, q2, q2 \n"
+ "veor q3, q3, q3 \n"
+ "2: \n"
+ // load 16 pixels into q0
+ MEMACCESS(0)
+ "vld1.8 {q0}, [%0], %3 \n"
+ "vaddw.u8 q3, q3, d1 \n"
+ "vaddw.u8 q2, q2, d0 \n"
+ "subs r12, r12, #1 \n"
+ "bgt 2b \n"
+ MEMACCESS(2)
+ "vst1.16 {q2, q3}, [%2]! \n" // store pixels
+ "add %1, %1, #16 \n"
+ "subs %4, %4, #16 \n" // 16 processed per loop
+ "bgt 1b \n"
+ : "+r"(src_tmp), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_ptr), // %2
+ "+r"(src_stride), // %3
+ "+r"(src_width), // %4
+ "+r"(src_height) // %5
+ :
+ : "memory", "cc", "r12", "q0", "q1", "q2", "q3" // Clobber List
+ );
+}
+
+// TODO(Yang Zhang): Investigate less load instructions for
+// the x/dx stepping
+#define LOAD2_DATA8_LANE(n) \
+ "lsr %5, %3, #16 \n" \
+ "add %6, %1, %5 \n" \
+ "add %3, %3, %4 \n" \
+ MEMACCESS(6) \
+ "vld2.8 {d6["#n"], d7["#n"]}, [%6] \n"
+
+void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ int dx_offset[4] = {0, 1, 2, 3};
+ int* tmp = dx_offset;
+ const uint8* src_tmp = src_ptr;
+ asm volatile (
+ ".p2align 2 \n"
+ "vdup.32 q0, %3 \n" // x
+ "vdup.32 q1, %4 \n" // dx
+ "vld1.32 {q2}, [%5] \n" // 0 1 2 3
+ "vshl.i32 q3, q1, #2 \n" // 4 * dx
+ "vmul.s32 q1, q1, q2 \n"
+ // x , x + 1 * dx, x + 2 * dx, x + 3 * dx
+ "vadd.s32 q1, q1, q0 \n"
+ // x + 4 * dx, x + 5 * dx, x + 6 * dx, x + 7 * dx
+ "vadd.s32 q2, q1, q3 \n"
+ "vshl.i32 q0, q3, #1 \n" // 8 * dx
+ "1: \n"
+ LOAD2_DATA8_LANE(0)
+ LOAD2_DATA8_LANE(1)
+ LOAD2_DATA8_LANE(2)
+ LOAD2_DATA8_LANE(3)
+ LOAD2_DATA8_LANE(4)
+ LOAD2_DATA8_LANE(5)
+ LOAD2_DATA8_LANE(6)
+ LOAD2_DATA8_LANE(7)
+ "vmov q10, q1 \n"
+ "vmov q11, q2 \n"
+ "vuzp.16 q10, q11 \n"
+ "vmovl.u8 q8, d6 \n"
+ "vmovl.u8 q9, d7 \n"
+ "vsubl.s16 q11, d18, d16 \n"
+ "vsubl.s16 q12, d19, d17 \n"
+ "vmovl.u16 q13, d20 \n"
+ "vmovl.u16 q10, d21 \n"
+ "vmul.s32 q11, q11, q13 \n"
+ "vmul.s32 q12, q12, q10 \n"
+ "vshrn.s32 d18, q11, #16 \n"
+ "vshrn.s32 d19, q12, #16 \n"
+ "vadd.s16 q8, q8, q9 \n"
+ "vmovn.s16 d6, q8 \n"
+
+ MEMACCESS(0)
+ "vst1.8 {d6}, [%0]! \n" // store pixels
+ "vadd.s32 q1, q1, q0 \n"
+ "vadd.s32 q2, q2, q0 \n"
+ "subs %2, %2, #8 \n" // 8 processed per loop
+ "bgt 1b \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(x), // %3
+ "+r"(dx), // %4
+ "+r"(tmp), // %5
+ "+r"(src_tmp) // %6
+ :
+ : "memory", "cc", "q0", "q1", "q2", "q3",
+ "q8", "q9", "q10", "q11", "q12", "q13"
+ );
+}
+
+#undef LOAD2_DATA8_LANE
+
+// 16x2 -> 16x1
+void ScaleFilterRows_NEON(uint8* dst_ptr,
+ const uint8* src_ptr, ptrdiff_t src_stride,
+ int dst_width, int source_y_fraction) {
+ asm volatile (
+ "cmp %4, #0 \n"
+ "beq 100f \n"
+ "add %2, %1 \n"
+ "cmp %4, #64 \n"
+ "beq 75f \n"
+ "cmp %4, #128 \n"
+ "beq 50f \n"
+ "cmp %4, #192 \n"
+ "beq 25f \n"
+
+ "vdup.8 d5, %4 \n"
+ "rsb %4, #256 \n"
+ "vdup.8 d4, %4 \n"
+ // General purpose row blend.
+ "1: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q1}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vmull.u8 q13, d0, d4 \n"
+ "vmull.u8 q14, d1, d4 \n"
+ "vmlal.u8 q13, d2, d5 \n"
+ "vmlal.u8 q14, d3, d5 \n"
+ "vrshrn.u16 d0, q13, #8 \n"
+ "vrshrn.u16 d1, q14, #8 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 1b \n"
+ "b 99f \n"
+
+ // Blend 25 / 75.
+ "25: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q1}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vrhadd.u8 q0, q1 \n"
+ "vrhadd.u8 q0, q1 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 25b \n"
+ "b 99f \n"
+
+ // Blend 50 / 50.
+ "50: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q1}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vrhadd.u8 q0, q1 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 50b \n"
+ "b 99f \n"
+
+ // Blend 75 / 25.
+ "75: \n"
+ MEMACCESS(1)
+ "vld1.8 {q1}, [%1]! \n"
+ MEMACCESS(2)
+ "vld1.8 {q0}, [%2]! \n"
+ "subs %3, %3, #16 \n"
+ "vrhadd.u8 q0, q1 \n"
+ "vrhadd.u8 q0, q1 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 75b \n"
+ "b 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ "100: \n"
+ MEMACCESS(1)
+ "vld1.8 {q0}, [%1]! \n"
+ "subs %3, %3, #16 \n"
+ MEMACCESS(0)
+ "vst1.8 {q0}, [%0]! \n"
+ "bgt 100b \n"
+
+ "99: \n"
+ MEMACCESS(0)
+ "vst1.8 {d1[7]}, [%0] \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(src_stride), // %2
+ "+r"(dst_width), // %3
+ "+r"(source_y_fraction) // %4
+ :
+ : "q0", "q1", "d4", "d5", "q13", "q14", "memory", "cc"
+ );
+}
+
+void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ // load even pixels into q0, odd into q1
+ MEMACCESS(0)
+ "vld2.32 {q0, q1}, [%0]! \n"
+ MEMACCESS(0)
+ "vld2.32 {q2, q3}, [%0]! \n"
+ "subs %2, %2, #8 \n" // 8 processed per loop
+ MEMACCESS(1)
+ "vst1.8 {q1}, [%1]! \n" // store odd pixels
+ MEMACCESS(1)
+ "vst1.8 {q3}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst), // %1
+ "+r"(dst_width) // %2
+ :
+ : "memory", "cc", "q0", "q1", "q2", "q3" // Clobber List
+ );
+}
+
+void ScaleARGBRowDown2Linear_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
+ "subs %2, %2, #8 \n" // 8 processed per loop
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+ "vpaddl.u8 q3, q3 \n" // A 16 bytes -> 8 shorts.
+ "vrshrn.u16 d0, q0, #1 \n" // downshift, round and pack
+ "vrshrn.u16 d1, q1, #1 \n"
+ "vrshrn.u16 d2, q2, #1 \n"
+ "vrshrn.u16 d3, q3, #1 \n"
+ MEMACCESS(1)
+ "vst4.8 {d0, d1, d2, d3}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ :
+ : "memory", "cc", "q0", "q1", "q2", "q3" // Clobber List
+ );
+}
+
+void ScaleARGBRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ // change the stride to row 2 pointer
+ "add %1, %1, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld4.8 {d0, d2, d4, d6}, [%0]! \n" // load 8 ARGB pixels.
+ MEMACCESS(0)
+ "vld4.8 {d1, d3, d5, d7}, [%0]! \n" // load next 8 ARGB pixels.
+ "subs %3, %3, #8 \n" // 8 processed per loop.
+ "vpaddl.u8 q0, q0 \n" // B 16 bytes -> 8 shorts.
+ "vpaddl.u8 q1, q1 \n" // G 16 bytes -> 8 shorts.
+ "vpaddl.u8 q2, q2 \n" // R 16 bytes -> 8 shorts.
+ "vpaddl.u8 q3, q3 \n" // A 16 bytes -> 8 shorts.
+ MEMACCESS(1)
+ "vld4.8 {d16, d18, d20, d22}, [%1]! \n" // load 8 more ARGB pixels.
+ MEMACCESS(1)
+ "vld4.8 {d17, d19, d21, d23}, [%1]! \n" // load last 8 ARGB pixels.
+ "vpadal.u8 q0, q8 \n" // B 16 bytes -> 8 shorts.
+ "vpadal.u8 q1, q9 \n" // G 16 bytes -> 8 shorts.
+ "vpadal.u8 q2, q10 \n" // R 16 bytes -> 8 shorts.
+ "vpadal.u8 q3, q11 \n" // A 16 bytes -> 8 shorts.
+ "vrshrn.u16 d0, q0, #2 \n" // downshift, round and pack
+ "vrshrn.u16 d1, q1, #2 \n"
+ "vrshrn.u16 d2, q2, #2 \n"
+ "vrshrn.u16 d3, q3, #2 \n"
+ MEMACCESS(2)
+ "vst4.8 {d0, d1, d2, d3}, [%2]! \n"
+ "bgt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(src_stride), // %1
+ "+r"(dst), // %2
+ "+r"(dst_width) // %3
+ :
+ : "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"
+ );
+}
+
+// Reads 4 pixels at a time.
+// Alignment requirement: src_argb 4 byte aligned.
+void ScaleARGBRowDownEven_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx, uint8* dst_argb, int dst_width) {
+ asm volatile (
+ "mov r12, %3, lsl #2 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.32 {d0[0]}, [%0], r12 \n"
+ MEMACCESS(0)
+ "vld1.32 {d0[1]}, [%0], r12 \n"
+ MEMACCESS(0)
+ "vld1.32 {d1[0]}, [%0], r12 \n"
+ MEMACCESS(0)
+ "vld1.32 {d1[1]}, [%0], r12 \n"
+ "subs %2, %2, #4 \n" // 4 pixels per loop.
+ MEMACCESS(1)
+ "vst1.8 {q0}, [%1]! \n"
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ : "r"(src_stepx) // %3
+ : "memory", "cc", "r12", "q0"
+ );
+}
+
+// Reads 4 pixels at a time.
+// Alignment requirement: src_argb 4 byte aligned.
+void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ "mov r12, %4, lsl #2 \n"
+ "add %1, %1, %0 \n"
+ ".p2align 2 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "vld1.8 {d0}, [%0], r12 \n" // Read 4 2x2 blocks -> 2x1
+ MEMACCESS(1)
+ "vld1.8 {d1}, [%1], r12 \n"
+ MEMACCESS(0)
+ "vld1.8 {d2}, [%0], r12 \n"
+ MEMACCESS(1)
+ "vld1.8 {d3}, [%1], r12 \n"
+ MEMACCESS(0)
+ "vld1.8 {d4}, [%0], r12 \n"
+ MEMACCESS(1)
+ "vld1.8 {d5}, [%1], r12 \n"
+ MEMACCESS(0)
+ "vld1.8 {d6}, [%0], r12 \n"
+ MEMACCESS(1)
+ "vld1.8 {d7}, [%1], r12 \n"
+ "vaddl.u8 q0, d0, d1 \n"
+ "vaddl.u8 q1, d2, d3 \n"
+ "vaddl.u8 q2, d4, d5 \n"
+ "vaddl.u8 q3, d6, d7 \n"
+ "vswp.8 d1, d2 \n" // ab_cd -> ac_bd
+ "vswp.8 d5, d6 \n" // ef_gh -> eg_fh
+ "vadd.u16 q0, q0, q1 \n" // (a+b)_(c+d)
+ "vadd.u16 q2, q2, q3 \n" // (e+f)_(g+h)
+ "vrshrn.u16 d0, q0, #2 \n" // first 2 pixels.
+ "vrshrn.u16 d1, q2, #2 \n" // next 2 pixels.
+ "subs %3, %3, #4 \n" // 4 pixels per loop.
+ MEMACCESS(2)
+ "vst1.8 {q0}, [%2]! \n"
+ "bgt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_stride), // %1
+ "+r"(dst_argb), // %2
+ "+r"(dst_width) // %3
+ : "r"(src_stepx) // %4
+ : "memory", "cc", "r12", "q0", "q1", "q2", "q3"
+ );
+}
+
+// TODO(Yang Zhang): Investigate less load instructions for
+// the x/dx stepping
+#define LOAD1_DATA32_LANE(dn, n) \
+ "lsr %5, %3, #16 \n" \
+ "add %6, %1, %5, lsl #2 \n" \
+ "add %3, %3, %4 \n" \
+ MEMACCESS(6) \
+ "vld1.32 {"#dn"["#n"]}, [%6] \n"
+
+void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ int tmp = 0;
+ const uint8* src_tmp = src_argb;
+ asm volatile (
+ ".p2align 2 \n"
+ "1: \n"
+ LOAD1_DATA32_LANE(d0, 0)
+ LOAD1_DATA32_LANE(d0, 1)
+ LOAD1_DATA32_LANE(d1, 0)
+ LOAD1_DATA32_LANE(d1, 1)
+ LOAD1_DATA32_LANE(d2, 0)
+ LOAD1_DATA32_LANE(d2, 1)
+ LOAD1_DATA32_LANE(d3, 0)
+ LOAD1_DATA32_LANE(d3, 1)
+
+ MEMACCESS(0)
+ "vst1.32 {q0, q1}, [%0]! \n" // store pixels
+ "subs %2, %2, #8 \n" // 8 processed per loop
+ "bgt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(src_argb), // %1
+ "+r"(dst_width), // %2
+ "+r"(x), // %3
+ "+r"(dx), // %4
+ "+r"(tmp), // %5
+ "+r"(src_tmp) // %6
+ :
+ : "memory", "cc", "q0", "q1"
+ );
+}
+
+#undef LOAD1_DATA32_LANE
+
+// TODO(Yang Zhang): Investigate less load instructions for
+// the x/dx stepping
+#define LOAD2_DATA32_LANE(dn1, dn2, n) \
+ "lsr %5, %3, #16 \n" \
+ "add %6, %1, %5, lsl #2 \n" \
+ "add %3, %3, %4 \n" \
+ MEMACCESS(6) \
+ "vld2.32 {"#dn1"["#n"], "#dn2"["#n"]}, [%6] \n"
+
+void ScaleARGBFilterCols_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ int dx_offset[4] = {0, 1, 2, 3};
+ int* tmp = dx_offset;
+ const uint8* src_tmp = src_argb;
+ asm volatile (
+ ".p2align 2 \n"
+ "vdup.32 q0, %3 \n" // x
+ "vdup.32 q1, %4 \n" // dx
+ "vld1.32 {q2}, [%5] \n" // 0 1 2 3
+ "vshl.i32 q9, q1, #2 \n" // 4 * dx
+ "vmul.s32 q1, q1, q2 \n"
+ "vmov.i8 q3, #0x7f \n" // 0x7F
+ "vmov.i16 q15, #0x7f \n" // 0x7F
+ // x , x + 1 * dx, x + 2 * dx, x + 3 * dx
+ "vadd.s32 q8, q1, q0 \n"
+ "1: \n"
+ // d0, d1: a
+ // d2, d3: b
+ LOAD2_DATA32_LANE(d0, d2, 0)
+ LOAD2_DATA32_LANE(d0, d2, 1)
+ LOAD2_DATA32_LANE(d1, d3, 0)
+ LOAD2_DATA32_LANE(d1, d3, 1)
+ "vshrn.i32 d22, q8, #9 \n"
+ "vand.16 d22, d22, d30 \n"
+ "vdup.8 d24, d22[0] \n"
+ "vdup.8 d25, d22[2] \n"
+ "vdup.8 d26, d22[4] \n"
+ "vdup.8 d27, d22[6] \n"
+ "vext.8 d4, d24, d25, #4 \n"
+ "vext.8 d5, d26, d27, #4 \n" // f
+ "veor.8 q10, q2, q3 \n" // 0x7f ^ f
+ "vmull.u8 q11, d0, d20 \n"
+ "vmull.u8 q12, d1, d21 \n"
+ "vmull.u8 q13, d2, d4 \n"
+ "vmull.u8 q14, d3, d5 \n"
+ "vadd.i16 q11, q11, q13 \n"
+ "vadd.i16 q12, q12, q14 \n"
+ "vshrn.i16 d0, q11, #7 \n"
+ "vshrn.i16 d1, q12, #7 \n"
+
+ MEMACCESS(0)
+ "vst1.32 {d0, d1}, [%0]! \n" // store pixels
+ "vadd.s32 q8, q8, q9 \n"
+ "subs %2, %2, #4 \n" // 4 processed per loop
+ "bgt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(src_argb), // %1
+ "+r"(dst_width), // %2
+ "+r"(x), // %3
+ "+r"(dx), // %4
+ "+r"(tmp), // %5
+ "+r"(src_tmp) // %6
+ :
+ : "memory", "cc", "q0", "q1", "q2", "q3", "q8", "q9",
+ "q10", "q11", "q12", "q13", "q14", "q15"
+ );
+}
+
+#undef LOAD2_DATA32_LANE
+
+#endif // defined(__ARM_NEON__) && !defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/scale_neon64.cc b/media/libaom/src/third_party/libyuv/source/scale_neon64.cc
new file mode 100644
index 000000000..1d5519357
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_neon64.cc
@@ -0,0 +1,1042 @@
+/*
+ * Copyright 2014 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/scale.h"
+#include "libyuv/row.h"
+#include "libyuv/scale_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for GCC Neon armv8 64 bit.
+#if !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+// Read 32x1 throw away even pixels, and write 16x1.
+void ScaleRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ "1: \n"
+ // load even pixels into v0, odd into v1
+ MEMACCESS(0)
+ "ld2 {v0.16b,v1.16b}, [%0], #32 \n"
+ "subs %w2, %w2, #16 \n" // 16 processed per loop
+ MEMACCESS(1)
+ "st1 {v1.16b}, [%1], #16 \n" // store odd pixels
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst), // %1
+ "+r"(dst_width) // %2
+ :
+ : "v0", "v1" // Clobber List
+ );
+}
+
+// Read 32x1 average down and write 16x1.
+void ScaleRowDown2Linear_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b,v1.16b}, [%0], #32 \n" // load pixels and post inc
+ "subs %w2, %w2, #16 \n" // 16 processed per loop
+ "uaddlp v0.8h, v0.16b \n" // add adjacent
+ "uaddlp v1.8h, v1.16b \n"
+ "rshrn v0.8b, v0.8h, #1 \n" // downshift, round and pack
+ "rshrn2 v0.16b, v1.8h, #1 \n"
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst), // %1
+ "+r"(dst_width) // %2
+ :
+ : "v0", "v1" // Clobber List
+ );
+}
+
+// Read 32x2 average down and write 16x1.
+void ScaleRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ // change the stride to row 2 pointer
+ "add %1, %1, %0 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b,v1.16b}, [%0], #32 \n" // load row 1 and post inc
+ MEMACCESS(1)
+ "ld1 {v2.16b, v3.16b}, [%1], #32 \n" // load row 2 and post inc
+ "subs %w3, %w3, #16 \n" // 16 processed per loop
+ "uaddlp v0.8h, v0.16b \n" // row 1 add adjacent
+ "uaddlp v1.8h, v1.16b \n"
+ "uadalp v0.8h, v2.16b \n" // row 2 add adjacent + row1
+ "uadalp v1.8h, v3.16b \n"
+ "rshrn v0.8b, v0.8h, #2 \n" // downshift, round and pack
+ "rshrn2 v0.16b, v1.8h, #2 \n"
+ MEMACCESS(2)
+ "st1 {v0.16b}, [%2], #16 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(src_stride), // %1
+ "+r"(dst), // %2
+ "+r"(dst_width) // %3
+ :
+ : "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+
+void ScaleRowDown4_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // src line 0
+ "subs %w2, %w2, #8 \n" // 8 processed per loop
+ MEMACCESS(1)
+ "st1 {v2.8b}, [%1], #8 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :
+ : "v0", "v1", "v2", "v3", "memory", "cc"
+ );
+}
+
+void ScaleRowDown4Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ const uint8* src_ptr1 = src_ptr + src_stride;
+ const uint8* src_ptr2 = src_ptr + src_stride * 2;
+ const uint8* src_ptr3 = src_ptr + src_stride * 3;
+asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], #16 \n" // load up 16x4
+ MEMACCESS(3)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ MEMACCESS(4)
+ "ld1 {v2.16b}, [%3], #16 \n"
+ MEMACCESS(5)
+ "ld1 {v3.16b}, [%4], #16 \n"
+ "subs %w5, %w5, #4 \n"
+ "uaddlp v0.8h, v0.16b \n"
+ "uadalp v0.8h, v1.16b \n"
+ "uadalp v0.8h, v2.16b \n"
+ "uadalp v0.8h, v3.16b \n"
+ "addp v0.8h, v0.8h, v0.8h \n"
+ "rshrn v0.8b, v0.8h, #4 \n" // divide by 16 w/rounding
+ MEMACCESS(1)
+ "st1 {v0.s}[0], [%1], #4 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(src_ptr1), // %2
+ "+r"(src_ptr2), // %3
+ "+r"(src_ptr3), // %4
+ "+r"(dst_width) // %5
+ :
+ : "v0", "v1", "v2", "v3", "memory", "cc"
+ );
+}
+
+// Down scale from 4 to 3 pixels. Use the neon multilane read/write
+// to load up the every 4th pixel into a 4 different registers.
+// Point samples 32 pixels to 24 pixels.
+void ScaleRowDown34_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // src line 0
+ "subs %w2, %w2, #24 \n"
+ "orr v2.16b, v3.16b, v3.16b \n" // order v0, v1, v2
+ MEMACCESS(1)
+ "st3 {v0.8b,v1.8b,v2.8b}, [%1], #24 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ :
+ : "v0", "v1", "v2", "v3", "memory", "cc"
+ );
+}
+
+void ScaleRowDown34_0_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movi v20.8b, #3 \n"
+ "add %3, %3, %0 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // src line 0
+ MEMACCESS(3)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%3], #32 \n" // src line 1
+ "subs %w2, %w2, #24 \n"
+
+ // filter src line 0 with src line 1
+ // expand chars to shorts to allow for room
+ // when adding lines together
+ "ushll v16.8h, v4.8b, #0 \n"
+ "ushll v17.8h, v5.8b, #0 \n"
+ "ushll v18.8h, v6.8b, #0 \n"
+ "ushll v19.8h, v7.8b, #0 \n"
+
+ // 3 * line_0 + line_1
+ "umlal v16.8h, v0.8b, v20.8b \n"
+ "umlal v17.8h, v1.8b, v20.8b \n"
+ "umlal v18.8h, v2.8b, v20.8b \n"
+ "umlal v19.8h, v3.8b, v20.8b \n"
+
+ // (3 * line_0 + line_1) >> 2
+ "uqrshrn v0.8b, v16.8h, #2 \n"
+ "uqrshrn v1.8b, v17.8h, #2 \n"
+ "uqrshrn v2.8b, v18.8h, #2 \n"
+ "uqrshrn v3.8b, v19.8h, #2 \n"
+
+ // a0 = (src[0] * 3 + s[1] * 1) >> 2
+ "ushll v16.8h, v1.8b, #0 \n"
+ "umlal v16.8h, v0.8b, v20.8b \n"
+ "uqrshrn v0.8b, v16.8h, #2 \n"
+
+ // a1 = (src[1] * 1 + s[2] * 1) >> 1
+ "urhadd v1.8b, v1.8b, v2.8b \n"
+
+ // a2 = (src[2] * 1 + s[3] * 3) >> 2
+ "ushll v16.8h, v2.8b, #0 \n"
+ "umlal v16.8h, v3.8b, v20.8b \n"
+ "uqrshrn v2.8b, v16.8h, #2 \n"
+
+ MEMACCESS(1)
+ "st3 {v0.8b,v1.8b,v2.8b}, [%1], #24 \n"
+
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_stride) // %3
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19",
+ "v20", "memory", "cc"
+ );
+}
+
+void ScaleRowDown34_1_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ "movi v20.8b, #3 \n"
+ "add %3, %3, %0 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n" // src line 0
+ MEMACCESS(3)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%3], #32 \n" // src line 1
+ "subs %w2, %w2, #24 \n"
+ // average src line 0 with src line 1
+ "urhadd v0.8b, v0.8b, v4.8b \n"
+ "urhadd v1.8b, v1.8b, v5.8b \n"
+ "urhadd v2.8b, v2.8b, v6.8b \n"
+ "urhadd v3.8b, v3.8b, v7.8b \n"
+
+ // a0 = (src[0] * 3 + s[1] * 1) >> 2
+ "ushll v4.8h, v1.8b, #0 \n"
+ "umlal v4.8h, v0.8b, v20.8b \n"
+ "uqrshrn v0.8b, v4.8h, #2 \n"
+
+ // a1 = (src[1] * 1 + s[2] * 1) >> 1
+ "urhadd v1.8b, v1.8b, v2.8b \n"
+
+ // a2 = (src[2] * 1 + s[3] * 3) >> 2
+ "ushll v4.8h, v2.8b, #0 \n"
+ "umlal v4.8h, v3.8b, v20.8b \n"
+ "uqrshrn v2.8b, v4.8h, #2 \n"
+
+ MEMACCESS(1)
+ "st3 {v0.8b,v1.8b,v2.8b}, [%1], #24 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width), // %2
+ "+r"(src_stride) // %3
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v20", "memory", "cc"
+ );
+}
+
+static uvec8 kShuf38 =
+ { 0, 3, 6, 8, 11, 14, 16, 19, 22, 24, 27, 30, 0, 0, 0, 0 };
+static uvec8 kShuf38_2 =
+ { 0, 16, 32, 2, 18, 33, 4, 20, 34, 6, 22, 35, 0, 0, 0, 0 };
+static vec16 kMult38_Div6 =
+ { 65536 / 12, 65536 / 12, 65536 / 12, 65536 / 12,
+ 65536 / 12, 65536 / 12, 65536 / 12, 65536 / 12 };
+static vec16 kMult38_Div9 =
+ { 65536 / 18, 65536 / 18, 65536 / 18, 65536 / 18,
+ 65536 / 18, 65536 / 18, 65536 / 18, 65536 / 18 };
+
+// 32 -> 12
+void ScaleRowDown38_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ asm volatile (
+ MEMACCESS(3)
+ "ld1 {v3.16b}, [%3] \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.16b,v1.16b}, [%0], #32 \n"
+ "subs %w2, %w2, #12 \n"
+ "tbl v2.16b, {v0.16b,v1.16b}, v3.16b \n"
+ MEMACCESS(1)
+ "st1 {v2.8b}, [%1], #8 \n"
+ MEMACCESS(1)
+ "st1 {v2.s}[2], [%1], #4 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(dst_width) // %2
+ : "r"(&kShuf38) // %3
+ : "v0", "v1", "v2", "v3", "memory", "cc"
+ );
+}
+
+// 32x3 -> 12x1
+void OMITFP ScaleRowDown38_3_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ const uint8* src_ptr1 = src_ptr + src_stride * 2;
+ ptrdiff_t tmp_src_stride = src_stride;
+
+ asm volatile (
+ MEMACCESS(5)
+ "ld1 {v29.8h}, [%5] \n"
+ MEMACCESS(6)
+ "ld1 {v30.16b}, [%6] \n"
+ MEMACCESS(7)
+ "ld1 {v31.8h}, [%7] \n"
+ "add %2, %2, %0 \n"
+ "1: \n"
+
+ // 00 40 01 41 02 42 03 43
+ // 10 50 11 51 12 52 13 53
+ // 20 60 21 61 22 62 23 63
+ // 30 70 31 71 32 72 33 73
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n"
+ MEMACCESS(3)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%2], #32 \n"
+ MEMACCESS(4)
+ "ld4 {v16.8b,v17.8b,v18.8b,v19.8b}, [%3], #32 \n"
+ "subs %w4, %w4, #12 \n"
+
+ // Shuffle the input data around to get align the data
+ // so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
+ // 00 10 01 11 02 12 03 13
+ // 40 50 41 51 42 52 43 53
+ "trn1 v20.8b, v0.8b, v1.8b \n"
+ "trn2 v21.8b, v0.8b, v1.8b \n"
+ "trn1 v22.8b, v4.8b, v5.8b \n"
+ "trn2 v23.8b, v4.8b, v5.8b \n"
+ "trn1 v24.8b, v16.8b, v17.8b \n"
+ "trn2 v25.8b, v16.8b, v17.8b \n"
+
+ // 20 30 21 31 22 32 23 33
+ // 60 70 61 71 62 72 63 73
+ "trn1 v0.8b, v2.8b, v3.8b \n"
+ "trn2 v1.8b, v2.8b, v3.8b \n"
+ "trn1 v4.8b, v6.8b, v7.8b \n"
+ "trn2 v5.8b, v6.8b, v7.8b \n"
+ "trn1 v16.8b, v18.8b, v19.8b \n"
+ "trn2 v17.8b, v18.8b, v19.8b \n"
+
+ // 00+10 01+11 02+12 03+13
+ // 40+50 41+51 42+52 43+53
+ "uaddlp v20.4h, v20.8b \n"
+ "uaddlp v21.4h, v21.8b \n"
+ "uaddlp v22.4h, v22.8b \n"
+ "uaddlp v23.4h, v23.8b \n"
+ "uaddlp v24.4h, v24.8b \n"
+ "uaddlp v25.4h, v25.8b \n"
+
+ // 60+70 61+71 62+72 63+73
+ "uaddlp v1.4h, v1.8b \n"
+ "uaddlp v5.4h, v5.8b \n"
+ "uaddlp v17.4h, v17.8b \n"
+
+ // combine source lines
+ "add v20.4h, v20.4h, v22.4h \n"
+ "add v21.4h, v21.4h, v23.4h \n"
+ "add v20.4h, v20.4h, v24.4h \n"
+ "add v21.4h, v21.4h, v25.4h \n"
+ "add v2.4h, v1.4h, v5.4h \n"
+ "add v2.4h, v2.4h, v17.4h \n"
+
+ // dst_ptr[3] = (s[6 + st * 0] + s[7 + st * 0]
+ // + s[6 + st * 1] + s[7 + st * 1]
+ // + s[6 + st * 2] + s[7 + st * 2]) / 6
+ "sqrdmulh v2.8h, v2.8h, v29.8h \n"
+ "xtn v2.8b, v2.8h \n"
+
+ // Shuffle 2,3 reg around so that 2 can be added to the
+ // 0,1 reg and 3 can be added to the 4,5 reg. This
+ // requires expanding from u8 to u16 as the 0,1 and 4,5
+ // registers are already expanded. Then do transposes
+ // to get aligned.
+ // xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
+ "ushll v16.8h, v16.8b, #0 \n"
+ "uaddl v0.8h, v0.8b, v4.8b \n"
+
+ // combine source lines
+ "add v0.8h, v0.8h, v16.8h \n"
+
+ // xx 20 xx 21 xx 22 xx 23
+ // xx 30 xx 31 xx 32 xx 33
+ "trn1 v1.8h, v0.8h, v0.8h \n"
+ "trn2 v4.8h, v0.8h, v0.8h \n"
+ "xtn v0.4h, v1.4s \n"
+ "xtn v4.4h, v4.4s \n"
+
+ // 0+1+2, 3+4+5
+ "add v20.8h, v20.8h, v0.8h \n"
+ "add v21.8h, v21.8h, v4.8h \n"
+
+ // Need to divide, but can't downshift as the the value
+ // isn't a power of 2. So multiply by 65536 / n
+ // and take the upper 16 bits.
+ "sqrdmulh v0.8h, v20.8h, v31.8h \n"
+ "sqrdmulh v1.8h, v21.8h, v31.8h \n"
+
+ // Align for table lookup, vtbl requires registers to
+ // be adjacent
+ "tbl v3.16b, {v0.16b, v1.16b, v2.16b}, v30.16b \n"
+
+ MEMACCESS(1)
+ "st1 {v3.8b}, [%1], #8 \n"
+ MEMACCESS(1)
+ "st1 {v3.s}[2], [%1], #4 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(tmp_src_stride), // %2
+ "+r"(src_ptr1), // %3
+ "+r"(dst_width) // %4
+ : "r"(&kMult38_Div6), // %5
+ "r"(&kShuf38_2), // %6
+ "r"(&kMult38_Div9) // %7
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
+ "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v29",
+ "v30", "v31", "memory", "cc"
+ );
+}
+
+// 32x2 -> 12x1
+void ScaleRowDown38_2_Box_NEON(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ // TODO(fbarchard): use src_stride directly for clang 3.5+.
+ ptrdiff_t tmp_src_stride = src_stride;
+ asm volatile (
+ MEMACCESS(4)
+ "ld1 {v30.8h}, [%4] \n"
+ MEMACCESS(5)
+ "ld1 {v31.16b}, [%5] \n"
+ "add %2, %2, %0 \n"
+ "1: \n"
+
+ // 00 40 01 41 02 42 03 43
+ // 10 50 11 51 12 52 13 53
+ // 20 60 21 61 22 62 23 63
+ // 30 70 31 71 32 72 33 73
+ MEMACCESS(0)
+ "ld4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%0], #32 \n"
+ MEMACCESS(3)
+ "ld4 {v4.8b,v5.8b,v6.8b,v7.8b}, [%2], #32 \n"
+ "subs %w3, %w3, #12 \n"
+
+ // Shuffle the input data around to get align the data
+ // so adjacent data can be added. 0,1 - 2,3 - 4,5 - 6,7
+ // 00 10 01 11 02 12 03 13
+ // 40 50 41 51 42 52 43 53
+ "trn1 v16.8b, v0.8b, v1.8b \n"
+ "trn2 v17.8b, v0.8b, v1.8b \n"
+ "trn1 v18.8b, v4.8b, v5.8b \n"
+ "trn2 v19.8b, v4.8b, v5.8b \n"
+
+ // 20 30 21 31 22 32 23 33
+ // 60 70 61 71 62 72 63 73
+ "trn1 v0.8b, v2.8b, v3.8b \n"
+ "trn2 v1.8b, v2.8b, v3.8b \n"
+ "trn1 v4.8b, v6.8b, v7.8b \n"
+ "trn2 v5.8b, v6.8b, v7.8b \n"
+
+ // 00+10 01+11 02+12 03+13
+ // 40+50 41+51 42+52 43+53
+ "uaddlp v16.4h, v16.8b \n"
+ "uaddlp v17.4h, v17.8b \n"
+ "uaddlp v18.4h, v18.8b \n"
+ "uaddlp v19.4h, v19.8b \n"
+
+ // 60+70 61+71 62+72 63+73
+ "uaddlp v1.4h, v1.8b \n"
+ "uaddlp v5.4h, v5.8b \n"
+
+ // combine source lines
+ "add v16.4h, v16.4h, v18.4h \n"
+ "add v17.4h, v17.4h, v19.4h \n"
+ "add v2.4h, v1.4h, v5.4h \n"
+
+ // dst_ptr[3] = (s[6] + s[7] + s[6+st] + s[7+st]) / 4
+ "uqrshrn v2.8b, v2.8h, #2 \n"
+
+ // Shuffle 2,3 reg around so that 2 can be added to the
+ // 0,1 reg and 3 can be added to the 4,5 reg. This
+ // requires expanding from u8 to u16 as the 0,1 and 4,5
+ // registers are already expanded. Then do transposes
+ // to get aligned.
+ // xx 20 xx 30 xx 21 xx 31 xx 22 xx 32 xx 23 xx 33
+
+ // combine source lines
+ "uaddl v0.8h, v0.8b, v4.8b \n"
+
+ // xx 20 xx 21 xx 22 xx 23
+ // xx 30 xx 31 xx 32 xx 33
+ "trn1 v1.8h, v0.8h, v0.8h \n"
+ "trn2 v4.8h, v0.8h, v0.8h \n"
+ "xtn v0.4h, v1.4s \n"
+ "xtn v4.4h, v4.4s \n"
+
+ // 0+1+2, 3+4+5
+ "add v16.8h, v16.8h, v0.8h \n"
+ "add v17.8h, v17.8h, v4.8h \n"
+
+ // Need to divide, but can't downshift as the the value
+ // isn't a power of 2. So multiply by 65536 / n
+ // and take the upper 16 bits.
+ "sqrdmulh v0.8h, v16.8h, v30.8h \n"
+ "sqrdmulh v1.8h, v17.8h, v30.8h \n"
+
+ // Align for table lookup, vtbl requires registers to
+ // be adjacent
+
+ "tbl v3.16b, {v0.16b, v1.16b, v2.16b}, v31.16b \n"
+
+ MEMACCESS(1)
+ "st1 {v3.8b}, [%1], #8 \n"
+ MEMACCESS(1)
+ "st1 {v3.s}[2], [%1], #4 \n"
+ "b.gt 1b \n"
+ : "+r"(src_ptr), // %0
+ "+r"(dst_ptr), // %1
+ "+r"(tmp_src_stride), // %2
+ "+r"(dst_width) // %3
+ : "r"(&kMult38_Div6), // %4
+ "r"(&kShuf38_2) // %5
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17",
+ "v18", "v19", "v30", "v31", "memory", "cc"
+ );
+}
+
+void ScaleAddRows_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint16* dst_ptr, int src_width, int src_height) {
+ const uint8* src_tmp = NULL;
+ asm volatile (
+ "1: \n"
+ "mov %0, %1 \n"
+ "mov w12, %w5 \n"
+ "eor v2.16b, v2.16b, v2.16b \n"
+ "eor v3.16b, v3.16b, v3.16b \n"
+ "2: \n"
+ // load 16 pixels into q0
+ MEMACCESS(0)
+ "ld1 {v0.16b}, [%0], %3 \n"
+ "uaddw2 v3.8h, v3.8h, v0.16b \n"
+ "uaddw v2.8h, v2.8h, v0.8b \n"
+ "subs w12, w12, #1 \n"
+ "b.gt 2b \n"
+ MEMACCESS(2)
+ "st1 {v2.8h, v3.8h}, [%2], #32 \n" // store pixels
+ "add %1, %1, #16 \n"
+ "subs %w4, %w4, #16 \n" // 16 processed per loop
+ "b.gt 1b \n"
+ : "+r"(src_tmp), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_ptr), // %2
+ "+r"(src_stride), // %3
+ "+r"(src_width), // %4
+ "+r"(src_height) // %5
+ :
+ : "memory", "cc", "w12", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+
+// TODO(Yang Zhang): Investigate less load instructions for
+// the x/dx stepping
+#define LOAD2_DATA8_LANE(n) \
+ "lsr %5, %3, #16 \n" \
+ "add %6, %1, %5 \n" \
+ "add %3, %3, %4 \n" \
+ MEMACCESS(6) \
+ "ld2 {v4.b, v5.b}["#n"], [%6] \n"
+
+void ScaleFilterCols_NEON(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ int dx_offset[4] = {0, 1, 2, 3};
+ int* tmp = dx_offset;
+ const uint8* src_tmp = src_ptr;
+ int64 dst_width64 = (int64) dst_width; // Work around ios 64 bit warning.
+ int64 x64 = (int64) x;
+ int64 dx64 = (int64) dx;
+ asm volatile (
+ "dup v0.4s, %w3 \n" // x
+ "dup v1.4s, %w4 \n" // dx
+ "ld1 {v2.4s}, [%5] \n" // 0 1 2 3
+ "shl v3.4s, v1.4s, #2 \n" // 4 * dx
+ "mul v1.4s, v1.4s, v2.4s \n"
+ // x , x + 1 * dx, x + 2 * dx, x + 3 * dx
+ "add v1.4s, v1.4s, v0.4s \n"
+ // x + 4 * dx, x + 5 * dx, x + 6 * dx, x + 7 * dx
+ "add v2.4s, v1.4s, v3.4s \n"
+ "shl v0.4s, v3.4s, #1 \n" // 8 * dx
+ "1: \n"
+ LOAD2_DATA8_LANE(0)
+ LOAD2_DATA8_LANE(1)
+ LOAD2_DATA8_LANE(2)
+ LOAD2_DATA8_LANE(3)
+ LOAD2_DATA8_LANE(4)
+ LOAD2_DATA8_LANE(5)
+ LOAD2_DATA8_LANE(6)
+ LOAD2_DATA8_LANE(7)
+ "mov v6.16b, v1.16b \n"
+ "mov v7.16b, v2.16b \n"
+ "uzp1 v6.8h, v6.8h, v7.8h \n"
+ "ushll v4.8h, v4.8b, #0 \n"
+ "ushll v5.8h, v5.8b, #0 \n"
+ "ssubl v16.4s, v5.4h, v4.4h \n"
+ "ssubl2 v17.4s, v5.8h, v4.8h \n"
+ "ushll v7.4s, v6.4h, #0 \n"
+ "ushll2 v6.4s, v6.8h, #0 \n"
+ "mul v16.4s, v16.4s, v7.4s \n"
+ "mul v17.4s, v17.4s, v6.4s \n"
+ "shrn v6.4h, v16.4s, #16 \n"
+ "shrn2 v6.8h, v17.4s, #16 \n"
+ "add v4.8h, v4.8h, v6.8h \n"
+ "xtn v4.8b, v4.8h \n"
+
+ MEMACCESS(0)
+ "st1 {v4.8b}, [%0], #8 \n" // store pixels
+ "add v1.4s, v1.4s, v0.4s \n"
+ "add v2.4s, v2.4s, v0.4s \n"
+ "subs %w2, %w2, #8 \n" // 8 processed per loop
+ "b.gt 1b \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(dst_width64), // %2
+ "+r"(x64), // %3
+ "+r"(dx64), // %4
+ "+r"(tmp), // %5
+ "+r"(src_tmp) // %6
+ :
+ : "memory", "cc", "v0", "v1", "v2", "v3",
+ "v4", "v5", "v6", "v7", "v16", "v17"
+ );
+}
+
+#undef LOAD2_DATA8_LANE
+
+// 16x2 -> 16x1
+void ScaleFilterRows_NEON(uint8* dst_ptr,
+ const uint8* src_ptr, ptrdiff_t src_stride,
+ int dst_width, int source_y_fraction) {
+ int y_fraction = 256 - source_y_fraction;
+ asm volatile (
+ "cmp %w4, #0 \n"
+ "b.eq 100f \n"
+ "add %2, %2, %1 \n"
+ "cmp %w4, #64 \n"
+ "b.eq 75f \n"
+ "cmp %w4, #128 \n"
+ "b.eq 50f \n"
+ "cmp %w4, #192 \n"
+ "b.eq 25f \n"
+
+ "dup v5.8b, %w4 \n"
+ "dup v4.8b, %w5 \n"
+ // General purpose row blend.
+ "1: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "umull v6.8h, v0.8b, v4.8b \n"
+ "umull2 v7.8h, v0.16b, v4.16b \n"
+ "umlal v6.8h, v1.8b, v5.8b \n"
+ "umlal2 v7.8h, v1.16b, v5.16b \n"
+ "rshrn v0.8b, v6.8h, #8 \n"
+ "rshrn2 v0.16b, v7.8h, #8 \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 1b \n"
+ "b 99f \n"
+
+ // Blend 25 / 75.
+ "25: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 25b \n"
+ "b 99f \n"
+
+ // Blend 50 / 50.
+ "50: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v1.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 50b \n"
+ "b 99f \n"
+
+ // Blend 75 / 25.
+ "75: \n"
+ MEMACCESS(1)
+ "ld1 {v1.16b}, [%1], #16 \n"
+ MEMACCESS(2)
+ "ld1 {v0.16b}, [%2], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ "urhadd v0.16b, v0.16b, v1.16b \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 75b \n"
+ "b 99f \n"
+
+ // Blend 100 / 0 - Copy row unchanged.
+ "100: \n"
+ MEMACCESS(1)
+ "ld1 {v0.16b}, [%1], #16 \n"
+ "subs %w3, %w3, #16 \n"
+ MEMACCESS(0)
+ "st1 {v0.16b}, [%0], #16 \n"
+ "b.gt 100b \n"
+
+ "99: \n"
+ MEMACCESS(0)
+ "st1 {v0.b}[15], [%0] \n"
+ : "+r"(dst_ptr), // %0
+ "+r"(src_ptr), // %1
+ "+r"(src_stride), // %2
+ "+r"(dst_width), // %3
+ "+r"(source_y_fraction),// %4
+ "+r"(y_fraction) // %5
+ :
+ : "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "memory", "cc"
+ );
+}
+
+void ScaleARGBRowDown2_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ "1: \n"
+ // load even pixels into q0, odd into q1
+ MEMACCESS (0)
+ "ld2 {v0.4s, v1.4s}, [%0], #32 \n"
+ MEMACCESS (0)
+ "ld2 {v2.4s, v3.4s}, [%0], #32 \n"
+ "subs %w2, %w2, #8 \n" // 8 processed per loop
+ MEMACCESS (1)
+ "st1 {v1.16b}, [%1], #16 \n" // store odd pixels
+ MEMACCESS (1)
+ "st1 {v3.16b}, [%1], #16 \n"
+ "b.gt 1b \n"
+ : "+r" (src_ptr), // %0
+ "+r" (dst), // %1
+ "+r" (dst_width) // %2
+ :
+ : "memory", "cc", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+
+void ScaleARGBRowDown2Linear_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS (0)
+ // load 8 ARGB pixels.
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n"
+ "subs %w2, %w2, #8 \n" // 8 processed per loop.
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+ "uaddlp v3.8h, v3.16b \n" // A 16 bytes -> 8 shorts.
+ "rshrn v0.8b, v0.8h, #1 \n" // downshift, round and pack
+ "rshrn v1.8b, v1.8h, #1 \n"
+ "rshrn v2.8b, v2.8h, #1 \n"
+ "rshrn v3.8b, v3.8h, #1 \n"
+ MEMACCESS (1)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%1], #32 \n"
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ :
+ : "memory", "cc", "v0", "v1", "v2", "v3" // Clobber List
+ );
+}
+
+void ScaleARGBRowDown2Box_NEON(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst, int dst_width) {
+ asm volatile (
+ // change the stride to row 2 pointer
+ "add %1, %1, %0 \n"
+ "1: \n"
+ MEMACCESS (0)
+ "ld4 {v0.16b,v1.16b,v2.16b,v3.16b}, [%0], #64 \n" // load 8 ARGB pixels.
+ "subs %w3, %w3, #8 \n" // 8 processed per loop.
+ "uaddlp v0.8h, v0.16b \n" // B 16 bytes -> 8 shorts.
+ "uaddlp v1.8h, v1.16b \n" // G 16 bytes -> 8 shorts.
+ "uaddlp v2.8h, v2.16b \n" // R 16 bytes -> 8 shorts.
+ "uaddlp v3.8h, v3.16b \n" // A 16 bytes -> 8 shorts.
+ MEMACCESS (1)
+ "ld4 {v16.16b,v17.16b,v18.16b,v19.16b}, [%1], #64 \n" // load 8 more ARGB pixels.
+ "uadalp v0.8h, v16.16b \n" // B 16 bytes -> 8 shorts.
+ "uadalp v1.8h, v17.16b \n" // G 16 bytes -> 8 shorts.
+ "uadalp v2.8h, v18.16b \n" // R 16 bytes -> 8 shorts.
+ "uadalp v3.8h, v19.16b \n" // A 16 bytes -> 8 shorts.
+ "rshrn v0.8b, v0.8h, #2 \n" // downshift, round and pack
+ "rshrn v1.8b, v1.8h, #2 \n"
+ "rshrn v2.8b, v2.8h, #2 \n"
+ "rshrn v3.8b, v3.8h, #2 \n"
+ MEMACCESS (2)
+ "st4 {v0.8b,v1.8b,v2.8b,v3.8b}, [%2], #32 \n"
+ "b.gt 1b \n"
+ : "+r" (src_ptr), // %0
+ "+r" (src_stride), // %1
+ "+r" (dst), // %2
+ "+r" (dst_width) // %3
+ :
+ : "memory", "cc", "v0", "v1", "v2", "v3", "v16", "v17", "v18", "v19"
+ );
+}
+
+// Reads 4 pixels at a time.
+// Alignment requirement: src_argb 4 byte aligned.
+void ScaleARGBRowDownEven_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx, uint8* dst_argb, int dst_width) {
+ asm volatile (
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[0], [%0], %3 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[1], [%0], %3 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[2], [%0], %3 \n"
+ MEMACCESS(0)
+ "ld1 {v0.s}[3], [%0], %3 \n"
+ "subs %w2, %w2, #4 \n" // 4 pixels per loop.
+ MEMACCESS(1)
+ "st1 {v0.16b}, [%1], #16 \n"
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(dst_argb), // %1
+ "+r"(dst_width) // %2
+ : "r"((int64)(src_stepx * 4)) // %3
+ : "memory", "cc", "v0"
+ );
+}
+
+// Reads 4 pixels at a time.
+// Alignment requirement: src_argb 4 byte aligned.
+// TODO(Yang Zhang): Might be worth another optimization pass in future.
+// It could be upgraded to 8 pixels at a time to start with.
+void ScaleARGBRowDownEvenBox_NEON(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ asm volatile (
+ "add %1, %1, %0 \n"
+ "1: \n"
+ MEMACCESS(0)
+ "ld1 {v0.8b}, [%0], %4 \n" // Read 4 2x2 blocks -> 2x1
+ MEMACCESS(1)
+ "ld1 {v1.8b}, [%1], %4 \n"
+ MEMACCESS(0)
+ "ld1 {v2.8b}, [%0], %4 \n"
+ MEMACCESS(1)
+ "ld1 {v3.8b}, [%1], %4 \n"
+ MEMACCESS(0)
+ "ld1 {v4.8b}, [%0], %4 \n"
+ MEMACCESS(1)
+ "ld1 {v5.8b}, [%1], %4 \n"
+ MEMACCESS(0)
+ "ld1 {v6.8b}, [%0], %4 \n"
+ MEMACCESS(1)
+ "ld1 {v7.8b}, [%1], %4 \n"
+ "uaddl v0.8h, v0.8b, v1.8b \n"
+ "uaddl v2.8h, v2.8b, v3.8b \n"
+ "uaddl v4.8h, v4.8b, v5.8b \n"
+ "uaddl v6.8h, v6.8b, v7.8b \n"
+ "mov v16.d[1], v0.d[1] \n" // ab_cd -> ac_bd
+ "mov v0.d[1], v2.d[0] \n"
+ "mov v2.d[0], v16.d[1] \n"
+ "mov v16.d[1], v4.d[1] \n" // ef_gh -> eg_fh
+ "mov v4.d[1], v6.d[0] \n"
+ "mov v6.d[0], v16.d[1] \n"
+ "add v0.8h, v0.8h, v2.8h \n" // (a+b)_(c+d)
+ "add v4.8h, v4.8h, v6.8h \n" // (e+f)_(g+h)
+ "rshrn v0.8b, v0.8h, #2 \n" // first 2 pixels.
+ "rshrn2 v0.16b, v4.8h, #2 \n" // next 2 pixels.
+ "subs %w3, %w3, #4 \n" // 4 pixels per loop.
+ MEMACCESS(2)
+ "st1 {v0.16b}, [%2], #16 \n"
+ "b.gt 1b \n"
+ : "+r"(src_argb), // %0
+ "+r"(src_stride), // %1
+ "+r"(dst_argb), // %2
+ "+r"(dst_width) // %3
+ : "r"((int64)(src_stepx * 4)) // %4
+ : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16"
+ );
+}
+
+// TODO(Yang Zhang): Investigate less load instructions for
+// the x/dx stepping
+#define LOAD1_DATA32_LANE(vn, n) \
+ "lsr %5, %3, #16 \n" \
+ "add %6, %1, %5, lsl #2 \n" \
+ "add %3, %3, %4 \n" \
+ MEMACCESS(6) \
+ "ld1 {"#vn".s}["#n"], [%6] \n"
+
+void ScaleARGBCols_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ const uint8* src_tmp = src_argb;
+ int64 dst_width64 = (int64) dst_width; // Work around ios 64 bit warning.
+ int64 x64 = (int64) x;
+ int64 dx64 = (int64) dx;
+ int64 tmp64 = 0;
+ asm volatile (
+ "1: \n"
+ LOAD1_DATA32_LANE(v0, 0)
+ LOAD1_DATA32_LANE(v0, 1)
+ LOAD1_DATA32_LANE(v0, 2)
+ LOAD1_DATA32_LANE(v0, 3)
+ LOAD1_DATA32_LANE(v1, 0)
+ LOAD1_DATA32_LANE(v1, 1)
+ LOAD1_DATA32_LANE(v1, 2)
+ LOAD1_DATA32_LANE(v1, 3)
+
+ MEMACCESS(0)
+ "st1 {v0.4s, v1.4s}, [%0], #32 \n" // store pixels
+ "subs %w2, %w2, #8 \n" // 8 processed per loop
+ "b.gt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(src_argb), // %1
+ "+r"(dst_width64), // %2
+ "+r"(x64), // %3
+ "+r"(dx64), // %4
+ "+r"(tmp64), // %5
+ "+r"(src_tmp) // %6
+ :
+ : "memory", "cc", "v0", "v1"
+ );
+}
+
+#undef LOAD1_DATA32_LANE
+
+// TODO(Yang Zhang): Investigate less load instructions for
+// the x/dx stepping
+#define LOAD2_DATA32_LANE(vn1, vn2, n) \
+ "lsr %5, %3, #16 \n" \
+ "add %6, %1, %5, lsl #2 \n" \
+ "add %3, %3, %4 \n" \
+ MEMACCESS(6) \
+ "ld2 {"#vn1".s, "#vn2".s}["#n"], [%6] \n"
+
+void ScaleARGBFilterCols_NEON(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ int dx_offset[4] = {0, 1, 2, 3};
+ int* tmp = dx_offset;
+ const uint8* src_tmp = src_argb;
+ int64 dst_width64 = (int64) dst_width; // Work around ios 64 bit warning.
+ int64 x64 = (int64) x;
+ int64 dx64 = (int64) dx;
+ asm volatile (
+ "dup v0.4s, %w3 \n" // x
+ "dup v1.4s, %w4 \n" // dx
+ "ld1 {v2.4s}, [%5] \n" // 0 1 2 3
+ "shl v6.4s, v1.4s, #2 \n" // 4 * dx
+ "mul v1.4s, v1.4s, v2.4s \n"
+ "movi v3.16b, #0x7f \n" // 0x7F
+ "movi v4.8h, #0x7f \n" // 0x7F
+ // x , x + 1 * dx, x + 2 * dx, x + 3 * dx
+ "add v5.4s, v1.4s, v0.4s \n"
+ "1: \n"
+ // d0, d1: a
+ // d2, d3: b
+ LOAD2_DATA32_LANE(v0, v1, 0)
+ LOAD2_DATA32_LANE(v0, v1, 1)
+ LOAD2_DATA32_LANE(v0, v1, 2)
+ LOAD2_DATA32_LANE(v0, v1, 3)
+ "shrn v2.4h, v5.4s, #9 \n"
+ "and v2.8b, v2.8b, v4.8b \n"
+ "dup v16.8b, v2.b[0] \n"
+ "dup v17.8b, v2.b[2] \n"
+ "dup v18.8b, v2.b[4] \n"
+ "dup v19.8b, v2.b[6] \n"
+ "ext v2.8b, v16.8b, v17.8b, #4 \n"
+ "ext v17.8b, v18.8b, v19.8b, #4 \n"
+ "ins v2.d[1], v17.d[0] \n" // f
+ "eor v7.16b, v2.16b, v3.16b \n" // 0x7f ^ f
+ "umull v16.8h, v0.8b, v7.8b \n"
+ "umull2 v17.8h, v0.16b, v7.16b \n"
+ "umull v18.8h, v1.8b, v2.8b \n"
+ "umull2 v19.8h, v1.16b, v2.16b \n"
+ "add v16.8h, v16.8h, v18.8h \n"
+ "add v17.8h, v17.8h, v19.8h \n"
+ "shrn v0.8b, v16.8h, #7 \n"
+ "shrn2 v0.16b, v17.8h, #7 \n"
+
+ MEMACCESS(0)
+ "st1 {v0.4s}, [%0], #16 \n" // store pixels
+ "add v5.4s, v5.4s, v6.4s \n"
+ "subs %w2, %w2, #4 \n" // 4 processed per loop
+ "b.gt 1b \n"
+ : "+r"(dst_argb), // %0
+ "+r"(src_argb), // %1
+ "+r"(dst_width64), // %2
+ "+r"(x64), // %3
+ "+r"(dx64), // %4
+ "+r"(tmp), // %5
+ "+r"(src_tmp) // %6
+ :
+ : "memory", "cc", "v0", "v1", "v2", "v3", "v4", "v5",
+ "v6", "v7", "v16", "v17", "v18", "v19"
+ );
+}
+
+#undef LOAD2_DATA32_LANE
+
+#endif // !defined(LIBYUV_DISABLE_NEON) && defined(__aarch64__)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/scale_win.cc b/media/libaom/src/third_party/libyuv/source/scale_win.cc
new file mode 100644
index 000000000..c3896ebad
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/scale_win.cc
@@ -0,0 +1,1354 @@
+/*
+ * Copyright 2013 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+#include "libyuv/row.h"
+#include "libyuv/scale_row.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+// This module is for Visual C x86.
+#if !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86) && \
+ defined(_MSC_VER) && !defined(__clang__)
+
+// Offsets for source bytes 0 to 9
+static uvec8 kShuf0 =
+ { 0, 1, 3, 4, 5, 7, 8, 9, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Offsets for source bytes 11 to 20 with 8 subtracted = 3 to 12.
+static uvec8 kShuf1 =
+ { 3, 4, 5, 7, 8, 9, 11, 12, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Offsets for source bytes 21 to 31 with 16 subtracted = 5 to 31.
+static uvec8 kShuf2 =
+ { 5, 7, 8, 9, 11, 12, 13, 15, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Offsets for source bytes 0 to 10
+static uvec8 kShuf01 =
+ { 0, 1, 1, 2, 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10 };
+
+// Offsets for source bytes 10 to 21 with 8 subtracted = 3 to 13.
+static uvec8 kShuf11 =
+ { 2, 3, 4, 5, 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13 };
+
+// Offsets for source bytes 21 to 31 with 16 subtracted = 5 to 31.
+static uvec8 kShuf21 =
+ { 5, 6, 6, 7, 8, 9, 9, 10, 10, 11, 12, 13, 13, 14, 14, 15 };
+
+// Coefficients for source bytes 0 to 10
+static uvec8 kMadd01 =
+ { 3, 1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3, 1, 2, 2 };
+
+// Coefficients for source bytes 10 to 21
+static uvec8 kMadd11 =
+ { 1, 3, 3, 1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3, 1 };
+
+// Coefficients for source bytes 21 to 31
+static uvec8 kMadd21 =
+ { 2, 2, 1, 3, 3, 1, 2, 2, 1, 3, 3, 1, 2, 2, 1, 3 };
+
+// Coefficients for source bytes 21 to 31
+static vec16 kRound34 =
+ { 2, 2, 2, 2, 2, 2, 2, 2 };
+
+static uvec8 kShuf38a =
+ { 0, 3, 6, 8, 11, 14, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+static uvec8 kShuf38b =
+ { 128, 128, 128, 128, 128, 128, 0, 3, 6, 8, 11, 14, 128, 128, 128, 128 };
+
+// Arrange words 0,3,6 into 0,1,2
+static uvec8 kShufAc =
+ { 0, 1, 6, 7, 12, 13, 128, 128, 128, 128, 128, 128, 128, 128, 128, 128 };
+
+// Arrange words 0,3,6 into 3,4,5
+static uvec8 kShufAc3 =
+ { 128, 128, 128, 128, 128, 128, 0, 1, 6, 7, 12, 13, 128, 128, 128, 128 };
+
+// Scaling values for boxes of 3x3 and 2x3
+static uvec16 kScaleAc33 =
+ { 65536 / 9, 65536 / 9, 65536 / 6, 65536 / 9, 65536 / 9, 65536 / 6, 0, 0 };
+
+// Arrange first value for pixels 0,1,2,3,4,5
+static uvec8 kShufAb0 =
+ { 0, 128, 3, 128, 6, 128, 8, 128, 11, 128, 14, 128, 128, 128, 128, 128 };
+
+// Arrange second value for pixels 0,1,2,3,4,5
+static uvec8 kShufAb1 =
+ { 1, 128, 4, 128, 7, 128, 9, 128, 12, 128, 15, 128, 128, 128, 128, 128 };
+
+// Arrange third value for pixels 0,1,2,3,4,5
+static uvec8 kShufAb2 =
+ { 2, 128, 5, 128, 128, 128, 10, 128, 13, 128, 128, 128, 128, 128, 128, 128 };
+
+// Scaling values for boxes of 3x2 and 2x2
+static uvec16 kScaleAb2 =
+ { 65536 / 3, 65536 / 3, 65536 / 2, 65536 / 3, 65536 / 3, 65536 / 2, 0, 0 };
+
+// Reads 32 pixels, throws half away and writes 16 pixels.
+__declspec(naked)
+void ScaleRowDown2_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ psrlw xmm0, 8 // isolate odd pixels.
+ psrlw xmm1, 8
+ packuswb xmm0, xmm1
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg wloop
+
+ ret
+ }
+}
+
+// Blends 32x1 rectangle to 16x1.
+__declspec(naked)
+void ScaleRowDown2Linear_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+
+ movdqa xmm2, xmm0 // average columns (32 to 16 pixels)
+ psrlw xmm0, 8
+ movdqa xmm3, xmm1
+ psrlw xmm1, 8
+ pand xmm2, xmm5
+ pand xmm3, xmm5
+ pavgw xmm0, xmm2
+ pavgw xmm1, xmm3
+ packuswb xmm0, xmm1
+
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg wloop
+
+ ret
+ }
+}
+
+// Blends 32x2 rectangle to 16x1.
+__declspec(naked)
+void ScaleRowDown2Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_ptr
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_ptr
+ mov ecx, [esp + 4 + 16] // dst_width
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff00ff
+ psrlw xmm5, 8
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + esi]
+ movdqu xmm3, [eax + esi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm0, xmm2 // average rows
+ pavgb xmm1, xmm3
+
+ movdqa xmm2, xmm0 // average columns (32 to 16 pixels)
+ psrlw xmm0, 8
+ movdqa xmm3, xmm1
+ psrlw xmm1, 8
+ pand xmm2, xmm5
+ pand xmm3, xmm5
+ pavgw xmm0, xmm2
+ pavgw xmm1, xmm3
+ packuswb xmm0, xmm1
+
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg wloop
+
+ pop esi
+ ret
+ }
+}
+
+#ifdef HAS_SCALEROWDOWN2_AVX2
+// Reads 64 pixels, throws half away and writes 32 pixels.
+__declspec(naked)
+void ScaleRowDown2_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+
+ wloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpsrlw ymm0, ymm0, 8 // isolate odd pixels.
+ vpsrlw ymm1, ymm1, 8
+ vpackuswb ymm0, ymm0, ymm1
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg wloop
+
+ vzeroupper
+ ret
+ }
+}
+
+// Blends 64x1 rectangle to 32x1.
+__declspec(naked)
+void ScaleRowDown2Linear_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+
+ vpcmpeqb ymm4, ymm4, ymm4 // '1' constant, 8b
+ vpsrlw ymm4, ymm4, 15
+ vpackuswb ymm4, ymm4, ymm4
+ vpxor ymm5, ymm5, ymm5 // constant 0
+
+ wloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+
+ vpmaddubsw ymm0, ymm0, ymm4 // average horizontally
+ vpmaddubsw ymm1, ymm1, ymm4
+ vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2
+ vpavgw ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg wloop
+
+ vzeroupper
+ ret
+ }
+}
+
+// Blends 64x2 rectangle to 32x1.
+__declspec(naked)
+void ScaleRowDown2Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_ptr
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_ptr
+ mov ecx, [esp + 4 + 16] // dst_width
+
+ vpcmpeqb ymm4, ymm4, ymm4 // '1' constant, 8b
+ vpsrlw ymm4, ymm4, 15
+ vpackuswb ymm4, ymm4, ymm4
+ vpxor ymm5, ymm5, ymm5 // constant 0
+
+ wloop:
+ vmovdqu ymm0, [eax] // average rows
+ vmovdqu ymm1, [eax + 32]
+ vpavgb ymm0, ymm0, [eax + esi]
+ vpavgb ymm1, ymm1, [eax + esi + 32]
+ lea eax, [eax + 64]
+
+ vpmaddubsw ymm0, ymm0, ymm4 // average horizontally
+ vpmaddubsw ymm1, ymm1, ymm4
+ vpavgw ymm0, ymm0, ymm5 // (x + 1) / 2
+ vpavgw ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+
+ vmovdqu [edx], ymm0
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg wloop
+
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_SCALEROWDOWN2_AVX2
+
+// Point samples 32 pixels to 8 pixels.
+__declspec(naked)
+void ScaleRowDown4_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+ pcmpeqb xmm5, xmm5 // generate mask 0x00ff0000
+ psrld xmm5, 24
+ pslld xmm5, 16
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ pand xmm0, xmm5
+ pand xmm1, xmm5
+ packuswb xmm0, xmm1
+ psrlw xmm0, 8
+ packuswb xmm0, xmm0
+ movq qword ptr [edx], xmm0
+ lea edx, [edx + 8]
+ sub ecx, 8
+ jg wloop
+
+ ret
+ }
+}
+
+// Blends 32x4 rectangle to 8x1.
+__declspec(naked)
+void ScaleRowDown4Box_SSE2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_ptr
+ mov esi, [esp + 8 + 8] // src_stride
+ mov edx, [esp + 8 + 12] // dst_ptr
+ mov ecx, [esp + 8 + 16] // dst_width
+ lea edi, [esi + esi * 2] // src_stride * 3
+ pcmpeqb xmm7, xmm7 // generate mask 0x00ff00ff
+ psrlw xmm7, 8
+
+ wloop:
+ movdqu xmm0, [eax] // average rows
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + esi]
+ movdqu xmm3, [eax + esi + 16]
+ pavgb xmm0, xmm2
+ pavgb xmm1, xmm3
+ movdqu xmm2, [eax + esi * 2]
+ movdqu xmm3, [eax + esi * 2 + 16]
+ movdqu xmm4, [eax + edi]
+ movdqu xmm5, [eax + edi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm2, xmm4
+ pavgb xmm3, xmm5
+ pavgb xmm0, xmm2
+ pavgb xmm1, xmm3
+
+ movdqa xmm2, xmm0 // average columns (32 to 16 pixels)
+ psrlw xmm0, 8
+ movdqa xmm3, xmm1
+ psrlw xmm1, 8
+ pand xmm2, xmm7
+ pand xmm3, xmm7
+ pavgw xmm0, xmm2
+ pavgw xmm1, xmm3
+ packuswb xmm0, xmm1
+
+ movdqa xmm2, xmm0 // average columns (16 to 8 pixels)
+ psrlw xmm0, 8
+ pand xmm2, xmm7
+ pavgw xmm0, xmm2
+ packuswb xmm0, xmm0
+
+ movq qword ptr [edx], xmm0
+ lea edx, [edx + 8]
+ sub ecx, 8
+ jg wloop
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+#ifdef HAS_SCALEROWDOWN4_AVX2
+// Point samples 64 pixels to 16 pixels.
+__declspec(naked)
+void ScaleRowDown4_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+ vpcmpeqb ymm5, ymm5, ymm5 // generate mask 0x00ff0000
+ vpsrld ymm5, ymm5, 24
+ vpslld ymm5, ymm5, 16
+
+ wloop:
+ vmovdqu ymm0, [eax]
+ vmovdqu ymm1, [eax + 32]
+ lea eax, [eax + 64]
+ vpand ymm0, ymm0, ymm5
+ vpand ymm1, ymm1, ymm5
+ vpackuswb ymm0, ymm0, ymm1
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+ vpsrlw ymm0, ymm0, 8
+ vpackuswb ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+ vmovdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg wloop
+
+ vzeroupper
+ ret
+ }
+}
+
+// Blends 64x4 rectangle to 16x1.
+__declspec(naked)
+void ScaleRowDown4Box_AVX2(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ push edi
+ mov eax, [esp + 8 + 4] // src_ptr
+ mov esi, [esp + 8 + 8] // src_stride
+ mov edx, [esp + 8 + 12] // dst_ptr
+ mov ecx, [esp + 8 + 16] // dst_width
+ lea edi, [esi + esi * 2] // src_stride * 3
+ vpcmpeqb ymm7, ymm7, ymm7 // generate mask 0x00ff00ff
+ vpsrlw ymm7, ymm7, 8
+
+ wloop:
+ vmovdqu ymm0, [eax] // average rows
+ vmovdqu ymm1, [eax + 32]
+ vpavgb ymm0, ymm0, [eax + esi]
+ vpavgb ymm1, ymm1, [eax + esi + 32]
+ vmovdqu ymm2, [eax + esi * 2]
+ vmovdqu ymm3, [eax + esi * 2 + 32]
+ vpavgb ymm2, ymm2, [eax + edi]
+ vpavgb ymm3, ymm3, [eax + edi + 32]
+ lea eax, [eax + 64]
+ vpavgb ymm0, ymm0, ymm2
+ vpavgb ymm1, ymm1, ymm3
+
+ vpand ymm2, ymm0, ymm7 // average columns (64 to 32 pixels)
+ vpand ymm3, ymm1, ymm7
+ vpsrlw ymm0, ymm0, 8
+ vpsrlw ymm1, ymm1, 8
+ vpavgw ymm0, ymm0, ymm2
+ vpavgw ymm1, ymm1, ymm3
+ vpackuswb ymm0, ymm0, ymm1
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+
+ vpand ymm2, ymm0, ymm7 // average columns (32 to 16 pixels)
+ vpsrlw ymm0, ymm0, 8
+ vpavgw ymm0, ymm0, ymm2
+ vpackuswb ymm0, ymm0, ymm0
+ vpermq ymm0, ymm0, 0xd8 // unmutate vpackuswb
+
+ vmovdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 16
+ jg wloop
+
+ pop edi
+ pop esi
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_SCALEROWDOWN4_AVX2
+
+// Point samples 32 pixels to 24 pixels.
+// Produces three 8 byte values. For each 8 bytes, 16 bytes are read.
+// Then shuffled to do the scaling.
+
+__declspec(naked)
+void ScaleRowDown34_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+ movdqa xmm3, kShuf0
+ movdqa xmm4, kShuf1
+ movdqa xmm5, kShuf2
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ movdqa xmm2, xmm1
+ palignr xmm1, xmm0, 8
+ pshufb xmm0, xmm3
+ pshufb xmm1, xmm4
+ pshufb xmm2, xmm5
+ movq qword ptr [edx], xmm0
+ movq qword ptr [edx + 8], xmm1
+ movq qword ptr [edx + 16], xmm2
+ lea edx, [edx + 24]
+ sub ecx, 24
+ jg wloop
+
+ ret
+ }
+}
+
+// Blends 32x2 rectangle to 24x1
+// Produces three 8 byte values. For each 8 bytes, 16 bytes are read.
+// Then shuffled to do the scaling.
+
+// Register usage:
+// xmm0 src_row 0
+// xmm1 src_row 1
+// xmm2 shuf 0
+// xmm3 shuf 1
+// xmm4 shuf 2
+// xmm5 madd 0
+// xmm6 madd 1
+// xmm7 kRound34
+
+// Note that movdqa+palign may be better than movdqu.
+__declspec(naked)
+void ScaleRowDown34_1_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_ptr
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_ptr
+ mov ecx, [esp + 4 + 16] // dst_width
+ movdqa xmm2, kShuf01
+ movdqa xmm3, kShuf11
+ movdqa xmm4, kShuf21
+ movdqa xmm5, kMadd01
+ movdqa xmm6, kMadd11
+ movdqa xmm7, kRound34
+
+ wloop:
+ movdqu xmm0, [eax] // pixels 0..7
+ movdqu xmm1, [eax + esi]
+ pavgb xmm0, xmm1
+ pshufb xmm0, xmm2
+ pmaddubsw xmm0, xmm5
+ paddsw xmm0, xmm7
+ psrlw xmm0, 2
+ packuswb xmm0, xmm0
+ movq qword ptr [edx], xmm0
+ movdqu xmm0, [eax + 8] // pixels 8..15
+ movdqu xmm1, [eax + esi + 8]
+ pavgb xmm0, xmm1
+ pshufb xmm0, xmm3
+ pmaddubsw xmm0, xmm6
+ paddsw xmm0, xmm7
+ psrlw xmm0, 2
+ packuswb xmm0, xmm0
+ movq qword ptr [edx + 8], xmm0
+ movdqu xmm0, [eax + 16] // pixels 16..23
+ movdqu xmm1, [eax + esi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm0, xmm1
+ pshufb xmm0, xmm4
+ movdqa xmm1, kMadd21
+ pmaddubsw xmm0, xmm1
+ paddsw xmm0, xmm7
+ psrlw xmm0, 2
+ packuswb xmm0, xmm0
+ movq qword ptr [edx + 16], xmm0
+ lea edx, [edx + 24]
+ sub ecx, 24
+ jg wloop
+
+ pop esi
+ ret
+ }
+}
+
+// Note that movdqa+palign may be better than movdqu.
+__declspec(naked)
+void ScaleRowDown34_0_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_ptr
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_ptr
+ mov ecx, [esp + 4 + 16] // dst_width
+ movdqa xmm2, kShuf01
+ movdqa xmm3, kShuf11
+ movdqa xmm4, kShuf21
+ movdqa xmm5, kMadd01
+ movdqa xmm6, kMadd11
+ movdqa xmm7, kRound34
+
+ wloop:
+ movdqu xmm0, [eax] // pixels 0..7
+ movdqu xmm1, [eax + esi]
+ pavgb xmm1, xmm0
+ pavgb xmm0, xmm1
+ pshufb xmm0, xmm2
+ pmaddubsw xmm0, xmm5
+ paddsw xmm0, xmm7
+ psrlw xmm0, 2
+ packuswb xmm0, xmm0
+ movq qword ptr [edx], xmm0
+ movdqu xmm0, [eax + 8] // pixels 8..15
+ movdqu xmm1, [eax + esi + 8]
+ pavgb xmm1, xmm0
+ pavgb xmm0, xmm1
+ pshufb xmm0, xmm3
+ pmaddubsw xmm0, xmm6
+ paddsw xmm0, xmm7
+ psrlw xmm0, 2
+ packuswb xmm0, xmm0
+ movq qword ptr [edx + 8], xmm0
+ movdqu xmm0, [eax + 16] // pixels 16..23
+ movdqu xmm1, [eax + esi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm1, xmm0
+ pavgb xmm0, xmm1
+ pshufb xmm0, xmm4
+ movdqa xmm1, kMadd21
+ pmaddubsw xmm0, xmm1
+ paddsw xmm0, xmm7
+ psrlw xmm0, 2
+ packuswb xmm0, xmm0
+ movq qword ptr [edx + 16], xmm0
+ lea edx, [edx+24]
+ sub ecx, 24
+ jg wloop
+
+ pop esi
+ ret
+ }
+}
+
+// 3/8 point sampler
+
+// Scale 32 pixels to 12
+__declspec(naked)
+void ScaleRowDown38_SSSE3(const uint8* src_ptr, ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_ptr
+ mov ecx, [esp + 16] // dst_width
+ movdqa xmm4, kShuf38a
+ movdqa xmm5, kShuf38b
+
+ xloop:
+ movdqu xmm0, [eax] // 16 pixels -> 0,1,2,3,4,5
+ movdqu xmm1, [eax + 16] // 16 pixels -> 6,7,8,9,10,11
+ lea eax, [eax + 32]
+ pshufb xmm0, xmm4
+ pshufb xmm1, xmm5
+ paddusb xmm0, xmm1
+
+ movq qword ptr [edx], xmm0 // write 12 pixels
+ movhlps xmm1, xmm0
+ movd [edx + 8], xmm1
+ lea edx, [edx + 12]
+ sub ecx, 12
+ jg xloop
+
+ ret
+ }
+}
+
+// Scale 16x3 pixels to 6x1 with interpolation
+__declspec(naked)
+void ScaleRowDown38_3_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_ptr
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_ptr
+ mov ecx, [esp + 4 + 16] // dst_width
+ movdqa xmm2, kShufAc
+ movdqa xmm3, kShufAc3
+ movdqa xmm4, kScaleAc33
+ pxor xmm5, xmm5
+
+ xloop:
+ movdqu xmm0, [eax] // sum up 3 rows into xmm0/1
+ movdqu xmm6, [eax + esi]
+ movhlps xmm1, xmm0
+ movhlps xmm7, xmm6
+ punpcklbw xmm0, xmm5
+ punpcklbw xmm1, xmm5
+ punpcklbw xmm6, xmm5
+ punpcklbw xmm7, xmm5
+ paddusw xmm0, xmm6
+ paddusw xmm1, xmm7
+ movdqu xmm6, [eax + esi * 2]
+ lea eax, [eax + 16]
+ movhlps xmm7, xmm6
+ punpcklbw xmm6, xmm5
+ punpcklbw xmm7, xmm5
+ paddusw xmm0, xmm6
+ paddusw xmm1, xmm7
+
+ movdqa xmm6, xmm0 // 8 pixels -> 0,1,2 of xmm6
+ psrldq xmm0, 2
+ paddusw xmm6, xmm0
+ psrldq xmm0, 2
+ paddusw xmm6, xmm0
+ pshufb xmm6, xmm2
+
+ movdqa xmm7, xmm1 // 8 pixels -> 3,4,5 of xmm6
+ psrldq xmm1, 2
+ paddusw xmm7, xmm1
+ psrldq xmm1, 2
+ paddusw xmm7, xmm1
+ pshufb xmm7, xmm3
+ paddusw xmm6, xmm7
+
+ pmulhuw xmm6, xmm4 // divide by 9,9,6, 9,9,6
+ packuswb xmm6, xmm6
+
+ movd [edx], xmm6 // write 6 pixels
+ psrlq xmm6, 16
+ movd [edx + 2], xmm6
+ lea edx, [edx + 6]
+ sub ecx, 6
+ jg xloop
+
+ pop esi
+ ret
+ }
+}
+
+// Scale 16x2 pixels to 6x1 with interpolation
+__declspec(naked)
+void ScaleRowDown38_2_Box_SSSE3(const uint8* src_ptr,
+ ptrdiff_t src_stride,
+ uint8* dst_ptr, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_ptr
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_ptr
+ mov ecx, [esp + 4 + 16] // dst_width
+ movdqa xmm2, kShufAb0
+ movdqa xmm3, kShufAb1
+ movdqa xmm4, kShufAb2
+ movdqa xmm5, kScaleAb2
+
+ xloop:
+ movdqu xmm0, [eax] // average 2 rows into xmm0
+ movdqu xmm1, [eax + esi]
+ lea eax, [eax + 16]
+ pavgb xmm0, xmm1
+
+ movdqa xmm1, xmm0 // 16 pixels -> 0,1,2,3,4,5 of xmm1
+ pshufb xmm1, xmm2
+ movdqa xmm6, xmm0
+ pshufb xmm6, xmm3
+ paddusw xmm1, xmm6
+ pshufb xmm0, xmm4
+ paddusw xmm1, xmm0
+
+ pmulhuw xmm1, xmm5 // divide by 3,3,2, 3,3,2
+ packuswb xmm1, xmm1
+
+ movd [edx], xmm1 // write 6 pixels
+ psrlq xmm1, 16
+ movd [edx + 2], xmm1
+ lea edx, [edx + 6]
+ sub ecx, 6
+ jg xloop
+
+ pop esi
+ ret
+ }
+}
+
+// Reads 16 bytes and accumulates to 16 shorts at a time.
+__declspec(naked)
+void ScaleAddRow_SSE2(const uint8* src_ptr, uint16* dst_ptr, int src_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ mov edx, [esp + 8] // dst_ptr
+ mov ecx, [esp + 12] // src_width
+ pxor xmm5, xmm5
+
+ // sum rows
+ xloop:
+ movdqu xmm3, [eax] // read 16 bytes
+ lea eax, [eax + 16]
+ movdqu xmm0, [edx] // read 16 words from destination
+ movdqu xmm1, [edx + 16]
+ movdqa xmm2, xmm3
+ punpcklbw xmm2, xmm5
+ punpckhbw xmm3, xmm5
+ paddusw xmm0, xmm2 // sum 16 words
+ paddusw xmm1, xmm3
+ movdqu [edx], xmm0 // write 16 words to destination
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 16
+ jg xloop
+ ret
+ }
+}
+
+#ifdef HAS_SCALEADDROW_AVX2
+// Reads 32 bytes and accumulates to 32 shorts at a time.
+__declspec(naked)
+void ScaleAddRow_AVX2(const uint8* src_ptr, uint16* dst_ptr, int src_width) {
+ __asm {
+ mov eax, [esp + 4] // src_ptr
+ mov edx, [esp + 8] // dst_ptr
+ mov ecx, [esp + 12] // src_width
+ vpxor ymm5, ymm5, ymm5
+
+ // sum rows
+ xloop:
+ vmovdqu ymm3, [eax] // read 32 bytes
+ lea eax, [eax + 32]
+ vpermq ymm3, ymm3, 0xd8 // unmutate for vpunpck
+ vpunpcklbw ymm2, ymm3, ymm5
+ vpunpckhbw ymm3, ymm3, ymm5
+ vpaddusw ymm0, ymm2, [edx] // sum 16 words
+ vpaddusw ymm1, ymm3, [edx + 32]
+ vmovdqu [edx], ymm0 // write 32 words to destination
+ vmovdqu [edx + 32], ymm1
+ lea edx, [edx + 64]
+ sub ecx, 32
+ jg xloop
+
+ vzeroupper
+ ret
+ }
+}
+#endif // HAS_SCALEADDROW_AVX2
+
+// Bilinear column filtering. SSSE3 version.
+__declspec(naked)
+void ScaleFilterCols_SSSE3(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ __asm {
+ push ebx
+ push esi
+ push edi
+ mov edi, [esp + 12 + 4] // dst_ptr
+ mov esi, [esp + 12 + 8] // src_ptr
+ mov ecx, [esp + 12 + 12] // dst_width
+ movd xmm2, [esp + 12 + 16] // x
+ movd xmm3, [esp + 12 + 20] // dx
+ mov eax, 0x04040000 // shuffle to line up fractions with pixel.
+ movd xmm5, eax
+ pcmpeqb xmm6, xmm6 // generate 0x007f for inverting fraction.
+ psrlw xmm6, 9
+ pextrw eax, xmm2, 1 // get x0 integer. preroll
+ sub ecx, 2
+ jl xloop29
+
+ movdqa xmm0, xmm2 // x1 = x0 + dx
+ paddd xmm0, xmm3
+ punpckldq xmm2, xmm0 // x0 x1
+ punpckldq xmm3, xmm3 // dx dx
+ paddd xmm3, xmm3 // dx * 2, dx * 2
+ pextrw edx, xmm2, 3 // get x1 integer. preroll
+
+ // 2 Pixel loop.
+ xloop2:
+ movdqa xmm1, xmm2 // x0, x1 fractions.
+ paddd xmm2, xmm3 // x += dx
+ movzx ebx, word ptr [esi + eax] // 2 source x0 pixels
+ movd xmm0, ebx
+ psrlw xmm1, 9 // 7 bit fractions.
+ movzx ebx, word ptr [esi + edx] // 2 source x1 pixels
+ movd xmm4, ebx
+ pshufb xmm1, xmm5 // 0011
+ punpcklwd xmm0, xmm4
+ pxor xmm1, xmm6 // 0..7f and 7f..0
+ pmaddubsw xmm0, xmm1 // 16 bit, 2 pixels.
+ pextrw eax, xmm2, 1 // get x0 integer. next iteration.
+ pextrw edx, xmm2, 3 // get x1 integer. next iteration.
+ psrlw xmm0, 7 // 8.7 fixed point to low 8 bits.
+ packuswb xmm0, xmm0 // 8 bits, 2 pixels.
+ movd ebx, xmm0
+ mov [edi], bx
+ lea edi, [edi + 2]
+ sub ecx, 2 // 2 pixels
+ jge xloop2
+
+ xloop29:
+
+ add ecx, 2 - 1
+ jl xloop99
+
+ // 1 pixel remainder
+ movzx ebx, word ptr [esi + eax] // 2 source x0 pixels
+ movd xmm0, ebx
+ psrlw xmm2, 9 // 7 bit fractions.
+ pshufb xmm2, xmm5 // 0011
+ pxor xmm2, xmm6 // 0..7f and 7f..0
+ pmaddubsw xmm0, xmm2 // 16 bit
+ psrlw xmm0, 7 // 8.7 fixed point to low 8 bits.
+ packuswb xmm0, xmm0 // 8 bits
+ movd ebx, xmm0
+ mov [edi], bl
+
+ xloop99:
+
+ pop edi
+ pop esi
+ pop ebx
+ ret
+ }
+}
+
+// Reads 16 pixels, duplicates them and writes 32 pixels.
+__declspec(naked)
+void ScaleColsUp2_SSE2(uint8* dst_ptr, const uint8* src_ptr,
+ int dst_width, int x, int dx) {
+ __asm {
+ mov edx, [esp + 4] // dst_ptr
+ mov eax, [esp + 8] // src_ptr
+ mov ecx, [esp + 12] // dst_width
+
+ wloop:
+ movdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpcklbw xmm0, xmm0
+ punpckhbw xmm1, xmm1
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 32
+ jg wloop
+
+ ret
+ }
+}
+
+// Reads 8 pixels, throws half away and writes 4 even pixels (0, 2, 4, 6)
+__declspec(naked)
+void ScaleARGBRowDown2_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_argb
+ mov ecx, [esp + 16] // dst_width
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ shufps xmm0, xmm1, 0xdd
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg wloop
+
+ ret
+ }
+}
+
+// Blends 8x1 rectangle to 4x1.
+__declspec(naked)
+void ScaleARGBRowDown2Linear_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ __asm {
+ mov eax, [esp + 4] // src_argb
+ // src_stride ignored
+ mov edx, [esp + 12] // dst_argb
+ mov ecx, [esp + 16] // dst_width
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ lea eax, [eax + 32]
+ movdqa xmm2, xmm0
+ shufps xmm0, xmm1, 0x88 // even pixels
+ shufps xmm2, xmm1, 0xdd // odd pixels
+ pavgb xmm0, xmm2
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg wloop
+
+ ret
+ }
+}
+
+// Blends 8x2 rectangle to 4x1.
+__declspec(naked)
+void ScaleARGBRowDown2Box_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ uint8* dst_argb, int dst_width) {
+ __asm {
+ push esi
+ mov eax, [esp + 4 + 4] // src_argb
+ mov esi, [esp + 4 + 8] // src_stride
+ mov edx, [esp + 4 + 12] // dst_argb
+ mov ecx, [esp + 4 + 16] // dst_width
+
+ wloop:
+ movdqu xmm0, [eax]
+ movdqu xmm1, [eax + 16]
+ movdqu xmm2, [eax + esi]
+ movdqu xmm3, [eax + esi + 16]
+ lea eax, [eax + 32]
+ pavgb xmm0, xmm2 // average rows
+ pavgb xmm1, xmm3
+ movdqa xmm2, xmm0 // average columns (8 to 4 pixels)
+ shufps xmm0, xmm1, 0x88 // even pixels
+ shufps xmm2, xmm1, 0xdd // odd pixels
+ pavgb xmm0, xmm2
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg wloop
+
+ pop esi
+ ret
+ }
+}
+
+// Reads 4 pixels at a time.
+__declspec(naked)
+void ScaleARGBRowDownEven_SSE2(const uint8* src_argb, ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ __asm {
+ push ebx
+ push edi
+ mov eax, [esp + 8 + 4] // src_argb
+ // src_stride ignored
+ mov ebx, [esp + 8 + 12] // src_stepx
+ mov edx, [esp + 8 + 16] // dst_argb
+ mov ecx, [esp + 8 + 20] // dst_width
+ lea ebx, [ebx * 4]
+ lea edi, [ebx + ebx * 2]
+
+ wloop:
+ movd xmm0, [eax]
+ movd xmm1, [eax + ebx]
+ punpckldq xmm0, xmm1
+ movd xmm2, [eax + ebx * 2]
+ movd xmm3, [eax + edi]
+ lea eax, [eax + ebx * 4]
+ punpckldq xmm2, xmm3
+ punpcklqdq xmm0, xmm2
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg wloop
+
+ pop edi
+ pop ebx
+ ret
+ }
+}
+
+// Blends four 2x2 to 4x1.
+__declspec(naked)
+void ScaleARGBRowDownEvenBox_SSE2(const uint8* src_argb,
+ ptrdiff_t src_stride,
+ int src_stepx,
+ uint8* dst_argb, int dst_width) {
+ __asm {
+ push ebx
+ push esi
+ push edi
+ mov eax, [esp + 12 + 4] // src_argb
+ mov esi, [esp + 12 + 8] // src_stride
+ mov ebx, [esp + 12 + 12] // src_stepx
+ mov edx, [esp + 12 + 16] // dst_argb
+ mov ecx, [esp + 12 + 20] // dst_width
+ lea esi, [eax + esi] // row1 pointer
+ lea ebx, [ebx * 4]
+ lea edi, [ebx + ebx * 2]
+
+ wloop:
+ movq xmm0, qword ptr [eax] // row0 4 pairs
+ movhps xmm0, qword ptr [eax + ebx]
+ movq xmm1, qword ptr [eax + ebx * 2]
+ movhps xmm1, qword ptr [eax + edi]
+ lea eax, [eax + ebx * 4]
+ movq xmm2, qword ptr [esi] // row1 4 pairs
+ movhps xmm2, qword ptr [esi + ebx]
+ movq xmm3, qword ptr [esi + ebx * 2]
+ movhps xmm3, qword ptr [esi + edi]
+ lea esi, [esi + ebx * 4]
+ pavgb xmm0, xmm2 // average rows
+ pavgb xmm1, xmm3
+ movdqa xmm2, xmm0 // average columns (8 to 4 pixels)
+ shufps xmm0, xmm1, 0x88 // even pixels
+ shufps xmm2, xmm1, 0xdd // odd pixels
+ pavgb xmm0, xmm2
+ movdqu [edx], xmm0
+ lea edx, [edx + 16]
+ sub ecx, 4
+ jg wloop
+
+ pop edi
+ pop esi
+ pop ebx
+ ret
+ }
+}
+
+// Column scaling unfiltered. SSE2 version.
+__declspec(naked)
+void ScaleARGBCols_SSE2(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ __asm {
+ push edi
+ push esi
+ mov edi, [esp + 8 + 4] // dst_argb
+ mov esi, [esp + 8 + 8] // src_argb
+ mov ecx, [esp + 8 + 12] // dst_width
+ movd xmm2, [esp + 8 + 16] // x
+ movd xmm3, [esp + 8 + 20] // dx
+
+ pshufd xmm2, xmm2, 0 // x0 x0 x0 x0
+ pshufd xmm0, xmm3, 0x11 // dx 0 dx 0
+ paddd xmm2, xmm0
+ paddd xmm3, xmm3 // 0, 0, 0, dx * 2
+ pshufd xmm0, xmm3, 0x05 // dx * 2, dx * 2, 0, 0
+ paddd xmm2, xmm0 // x3 x2 x1 x0
+ paddd xmm3, xmm3 // 0, 0, 0, dx * 4
+ pshufd xmm3, xmm3, 0 // dx * 4, dx * 4, dx * 4, dx * 4
+
+ pextrw eax, xmm2, 1 // get x0 integer.
+ pextrw edx, xmm2, 3 // get x1 integer.
+
+ cmp ecx, 0
+ jle xloop99
+ sub ecx, 4
+ jl xloop49
+
+ // 4 Pixel loop.
+ xloop4:
+ movd xmm0, [esi + eax * 4] // 1 source x0 pixels
+ movd xmm1, [esi + edx * 4] // 1 source x1 pixels
+ pextrw eax, xmm2, 5 // get x2 integer.
+ pextrw edx, xmm2, 7 // get x3 integer.
+ paddd xmm2, xmm3 // x += dx
+ punpckldq xmm0, xmm1 // x0 x1
+
+ movd xmm1, [esi + eax * 4] // 1 source x2 pixels
+ movd xmm4, [esi + edx * 4] // 1 source x3 pixels
+ pextrw eax, xmm2, 1 // get x0 integer. next iteration.
+ pextrw edx, xmm2, 3 // get x1 integer. next iteration.
+ punpckldq xmm1, xmm4 // x2 x3
+ punpcklqdq xmm0, xmm1 // x0 x1 x2 x3
+ movdqu [edi], xmm0
+ lea edi, [edi + 16]
+ sub ecx, 4 // 4 pixels
+ jge xloop4
+
+ xloop49:
+ test ecx, 2
+ je xloop29
+
+ // 2 Pixels.
+ movd xmm0, [esi + eax * 4] // 1 source x0 pixels
+ movd xmm1, [esi + edx * 4] // 1 source x1 pixels
+ pextrw eax, xmm2, 5 // get x2 integer.
+ punpckldq xmm0, xmm1 // x0 x1
+
+ movq qword ptr [edi], xmm0
+ lea edi, [edi + 8]
+
+ xloop29:
+ test ecx, 1
+ je xloop99
+
+ // 1 Pixels.
+ movd xmm0, [esi + eax * 4] // 1 source x2 pixels
+ movd dword ptr [edi], xmm0
+ xloop99:
+
+ pop esi
+ pop edi
+ ret
+ }
+}
+
+// Bilinear row filtering combines 2x1 -> 1x1. SSSE3 version.
+// TODO(fbarchard): Port to Neon
+
+// Shuffle table for arranging 2 pixels into pairs for pmaddubsw
+static uvec8 kShuffleColARGB = {
+ 0u, 4u, 1u, 5u, 2u, 6u, 3u, 7u, // bbggrraa 1st pixel
+ 8u, 12u, 9u, 13u, 10u, 14u, 11u, 15u // bbggrraa 2nd pixel
+};
+
+// Shuffle table for duplicating 2 fractions into 8 bytes each
+static uvec8 kShuffleFractions = {
+ 0u, 0u, 0u, 0u, 0u, 0u, 0u, 0u, 4u, 4u, 4u, 4u, 4u, 4u, 4u, 4u,
+};
+
+__declspec(naked)
+void ScaleARGBFilterCols_SSSE3(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ __asm {
+ push esi
+ push edi
+ mov edi, [esp + 8 + 4] // dst_argb
+ mov esi, [esp + 8 + 8] // src_argb
+ mov ecx, [esp + 8 + 12] // dst_width
+ movd xmm2, [esp + 8 + 16] // x
+ movd xmm3, [esp + 8 + 20] // dx
+ movdqa xmm4, kShuffleColARGB
+ movdqa xmm5, kShuffleFractions
+ pcmpeqb xmm6, xmm6 // generate 0x007f for inverting fraction.
+ psrlw xmm6, 9
+ pextrw eax, xmm2, 1 // get x0 integer. preroll
+ sub ecx, 2
+ jl xloop29
+
+ movdqa xmm0, xmm2 // x1 = x0 + dx
+ paddd xmm0, xmm3
+ punpckldq xmm2, xmm0 // x0 x1
+ punpckldq xmm3, xmm3 // dx dx
+ paddd xmm3, xmm3 // dx * 2, dx * 2
+ pextrw edx, xmm2, 3 // get x1 integer. preroll
+
+ // 2 Pixel loop.
+ xloop2:
+ movdqa xmm1, xmm2 // x0, x1 fractions.
+ paddd xmm2, xmm3 // x += dx
+ movq xmm0, qword ptr [esi + eax * 4] // 2 source x0 pixels
+ psrlw xmm1, 9 // 7 bit fractions.
+ movhps xmm0, qword ptr [esi + edx * 4] // 2 source x1 pixels
+ pshufb xmm1, xmm5 // 0000000011111111
+ pshufb xmm0, xmm4 // arrange pixels into pairs
+ pxor xmm1, xmm6 // 0..7f and 7f..0
+ pmaddubsw xmm0, xmm1 // argb_argb 16 bit, 2 pixels.
+ pextrw eax, xmm2, 1 // get x0 integer. next iteration.
+ pextrw edx, xmm2, 3 // get x1 integer. next iteration.
+ psrlw xmm0, 7 // argb 8.7 fixed point to low 8 bits.
+ packuswb xmm0, xmm0 // argb_argb 8 bits, 2 pixels.
+ movq qword ptr [edi], xmm0
+ lea edi, [edi + 8]
+ sub ecx, 2 // 2 pixels
+ jge xloop2
+
+ xloop29:
+
+ add ecx, 2 - 1
+ jl xloop99
+
+ // 1 pixel remainder
+ psrlw xmm2, 9 // 7 bit fractions.
+ movq xmm0, qword ptr [esi + eax * 4] // 2 source x0 pixels
+ pshufb xmm2, xmm5 // 00000000
+ pshufb xmm0, xmm4 // arrange pixels into pairs
+ pxor xmm2, xmm6 // 0..7f and 7f..0
+ pmaddubsw xmm0, xmm2 // argb 16 bit, 1 pixel.
+ psrlw xmm0, 7
+ packuswb xmm0, xmm0 // argb 8 bits, 1 pixel.
+ movd [edi], xmm0
+
+ xloop99:
+
+ pop edi
+ pop esi
+ ret
+ }
+}
+
+// Reads 4 pixels, duplicates them and writes 8 pixels.
+__declspec(naked)
+void ScaleARGBColsUp2_SSE2(uint8* dst_argb, const uint8* src_argb,
+ int dst_width, int x, int dx) {
+ __asm {
+ mov edx, [esp + 4] // dst_argb
+ mov eax, [esp + 8] // src_argb
+ mov ecx, [esp + 12] // dst_width
+
+ wloop:
+ movdqu xmm0, [eax]
+ lea eax, [eax + 16]
+ movdqa xmm1, xmm0
+ punpckldq xmm0, xmm0
+ punpckhdq xmm1, xmm1
+ movdqu [edx], xmm0
+ movdqu [edx + 16], xmm1
+ lea edx, [edx + 32]
+ sub ecx, 8
+ jg wloop
+
+ ret
+ }
+}
+
+// Divide num by div and return as 16.16 fixed point result.
+__declspec(naked)
+int FixedDiv_X86(int num, int div) {
+ __asm {
+ mov eax, [esp + 4] // num
+ cdq // extend num to 64 bits
+ shld edx, eax, 16 // 32.16
+ shl eax, 16
+ idiv dword ptr [esp + 8]
+ ret
+ }
+}
+
+// Divide num by div and return as 16.16 fixed point result.
+__declspec(naked)
+int FixedDiv1_X86(int num, int div) {
+ __asm {
+ mov eax, [esp + 4] // num
+ mov ecx, [esp + 8] // denom
+ cdq // extend num to 64 bits
+ shld edx, eax, 16 // 32.16
+ shl eax, 16
+ sub eax, 0x00010001
+ sbb edx, 0
+ sub ecx, 1
+ idiv ecx
+ ret
+ }
+}
+#endif // !defined(LIBYUV_DISABLE_X86) && defined(_M_IX86)
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
diff --git a/media/libaom/src/third_party/libyuv/source/video_common.cc b/media/libaom/src/third_party/libyuv/source/video_common.cc
new file mode 100644
index 000000000..379a0669a
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/video_common.cc
@@ -0,0 +1,64 @@
+/*
+ * Copyright 2011 The LibYuv Project Authors. All rights reserved.
+ *
+ * Use of this source code is governed by a BSD-style license
+ * that can be found in the LICENSE file in the root of the source
+ * tree. An additional intellectual property rights grant can be found
+ * in the file PATENTS. All contributing project authors may
+ * be found in the AUTHORS file in the root of the source tree.
+ */
+
+
+#include "libyuv/video_common.h"
+
+#ifdef __cplusplus
+namespace libyuv {
+extern "C" {
+#endif
+
+#define ARRAY_SIZE(x) (int)(sizeof(x) / sizeof(x[0]))
+
+struct FourCCAliasEntry {
+ uint32 alias;
+ uint32 canonical;
+};
+
+static const struct FourCCAliasEntry kFourCCAliases[] = {
+ {FOURCC_IYUV, FOURCC_I420},
+ {FOURCC_YU16, FOURCC_I422},
+ {FOURCC_YU24, FOURCC_I444},
+ {FOURCC_YUYV, FOURCC_YUY2},
+ {FOURCC_YUVS, FOURCC_YUY2}, // kCMPixelFormat_422YpCbCr8_yuvs
+ {FOURCC_HDYC, FOURCC_UYVY},
+ {FOURCC_2VUY, FOURCC_UYVY}, // kCMPixelFormat_422YpCbCr8
+ {FOURCC_JPEG, FOURCC_MJPG}, // Note: JPEG has DHT while MJPG does not.
+ {FOURCC_DMB1, FOURCC_MJPG},
+ {FOURCC_BA81, FOURCC_BGGR}, // deprecated.
+ {FOURCC_RGB3, FOURCC_RAW },
+ {FOURCC_BGR3, FOURCC_24BG},
+ {FOURCC_CM32, FOURCC_BGRA}, // kCMPixelFormat_32ARGB
+ {FOURCC_CM24, FOURCC_RAW }, // kCMPixelFormat_24RGB
+ {FOURCC_L555, FOURCC_RGBO}, // kCMPixelFormat_16LE555
+ {FOURCC_L565, FOURCC_RGBP}, // kCMPixelFormat_16LE565
+ {FOURCC_5551, FOURCC_RGBO}, // kCMPixelFormat_16LE5551
+};
+// TODO(fbarchard): Consider mapping kCMPixelFormat_32BGRA to FOURCC_ARGB.
+// {FOURCC_BGRA, FOURCC_ARGB}, // kCMPixelFormat_32BGRA
+
+LIBYUV_API
+uint32 CanonicalFourCC(uint32 fourcc) {
+ int i;
+ for (i = 0; i < ARRAY_SIZE(kFourCCAliases); ++i) {
+ if (kFourCCAliases[i].alias == fourcc) {
+ return kFourCCAliases[i].canonical;
+ }
+ }
+ // Not an alias, so return it as-is.
+ return fourcc;
+}
+
+#ifdef __cplusplus
+} // extern "C"
+} // namespace libyuv
+#endif
+
diff --git a/media/libaom/src/third_party/libyuv/source/x86inc.asm b/media/libaom/src/third_party/libyuv/source/x86inc.asm
new file mode 100644
index 000000000..cb5c32df3
--- /dev/null
+++ b/media/libaom/src/third_party/libyuv/source/x86inc.asm
@@ -0,0 +1,1136 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2012 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Anton Mitrofanov <BugMaster@narod.ru>
+;* Jason Garrett-Glaser <darkshikari@gmail.com>
+;* Henrik Gramner <hengar-6@student.ltu.se>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible. Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well. Send patches or ideas
+; to x264-devel@videolan.org .
+
+; Local changes for libyuv:
+; remove %define program_name and references in labels
+; rename cpus to uppercase
+
+%define WIN64 0
+%define UNIX64 0
+%if ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,win64
+ %define WIN64 1
+ %else
+ %define UNIX64 1
+ %endif
+%endif
+
+%ifdef PREFIX
+ %define mangle(x) _ %+ x
+%else
+ %define mangle(x) x
+%endif
+
+; Name of the .rodata section.
+; Kludge: Something on OS X fails to align .rodata even given an align attribute,
+; so use a different read-only section.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho64
+ SECTION .text align=%1
+ %elifidn __OUTPUT_FORMAT__,macho
+ SECTION .text align=%1
+ fakegot:
+ %elifidn __OUTPUT_FORMAT__,aout
+ section .text
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+; aout does not support align=
+%macro SECTION_TEXT 0-1 16
+ %ifidn __OUTPUT_FORMAT__,aout
+ SECTION .text
+ %else
+ SECTION .text align=%1
+ %endif
+%endmacro
+
+%if WIN64
+ %define PIC
+%elif ARCH_X86_64 == 0
+; x86_32 doesn't require PIC.
+; Some distros prefer shared objects to be PIC, but nothing breaks if
+; the code contains a few textrels, so we'll skip that complexity.
+ %undef PIC
+%endif
+%ifdef PIC
+ default rel
+%endif
+
+; Always use long nops (reduces 0x90 spam in disassembly on x86_32)
+CPU amdnop
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,0, dst, src, tmp
+; declares a function (foo), taking two args (dst and src) and one local variable (tmp)
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Same, but if it doesn't pop anything it becomes a 2-byte ret, for athlons
+; which are slow when a normal ret follows a branch.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNh is the high 8 bits of the word size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 2-3
+ %define r%1q %2
+ %define r%1d %2d
+ %define r%1w %2w
+ %define r%1b %2b
+ %define r%1h %2h
+ %if %0 == 2
+ %define r%1m %2d
+ %define r%1mp %2
+ %elif ARCH_X86_64 ; memory
+ %define r%1m [rsp + stack_offset + %3]
+ %define r%1mp qword r %+ %1m
+ %else
+ %define r%1m [esp + stack_offset + %3]
+ %define r%1mp dword r %+ %1m
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 3
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1h %3
+ %define e%1h %3
+ %define r%1b %2
+ %define e%1b %2
+%if ARCH_X86_64 == 0
+ %define r%1 e%1
+%endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al, ah
+DECLARE_REG_SIZE bx, bl, bh
+DECLARE_REG_SIZE cx, cl, ch
+DECLARE_REG_SIZE dx, dl, dh
+DECLARE_REG_SIZE si, sil, null
+DECLARE_REG_SIZE di, dil, null
+DECLARE_REG_SIZE bp, bpl, null
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1h t%1 %+ h
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %assign stack_offset stack_offset+gprsize
+%endmacro
+
+%macro POP 1
+ pop %1
+ %assign stack_offset stack_offset-gprsize
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ PUSH r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ pop r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+ %rep %0
+ %if %1 < num_args
+ mov r%1, r %+ %1 %+ mp
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rsp
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assert failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, h
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name %+ %%i, mp
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %xdefine %%stack_offset stack_offset
+ %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1h r %+ %%i %+ h
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ %xdefine %1mp r %+ %%i %+ mp
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %xdefine stack_offset %%stack_offset
+ %assign n_arg_names %0
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx
+DECLARE_REG 1, rdx
+DECLARE_REG 2, R8
+DECLARE_REG 3, R9
+DECLARE_REG 4, R10, 40
+DECLARE_REG 5, R11, 48
+DECLARE_REG 6, rax, 56
+DECLARE_REG 7, rdi, 64
+DECLARE_REG 8, rsi, 72
+DECLARE_REG 9, rbx, 80
+DECLARE_REG 10, rbp, 88
+DECLARE_REG 11, R12, 96
+DECLARE_REG 12, R13, 104
+DECLARE_REG 13, R14, 112
+DECLARE_REG 14, R15, 120
+
+%macro PROLOGUE 2-4+ 0 ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+ %if mmsize == 8
+ %assign xmm_regs_used 0
+ %else
+ WIN64_SPILL_XMM %3
+ %endif
+ LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS %4
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+ %assign xmm_regs_used %1
+ ASSERT xmm_regs_used <= 16
+ %if xmm_regs_used > 6
+ SUB rsp, (xmm_regs_used-6)*16+16
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa [rsp + (%%i-6)*16+(~stack_offset&8)], xmm %+ %%i
+ %endrep
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 1
+ %if xmm_regs_used > 6
+ %assign %%i xmm_regs_used
+ %rep (xmm_regs_used-6)
+ %assign %%i %%i-1
+ movdqa xmm %+ %%i, [%1 + (%%i-6)*16+(~stack_offset&8)]
+ %endrep
+ add %1, (xmm_regs_used-6)*16+16
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 1
+ WIN64_RESTORE_XMM_INTERNAL %1
+ %assign stack_offset stack_offset-(xmm_regs_used-6)*16+16
+ %assign xmm_regs_used 0
+%endmacro
+
+%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32
+
+%macro RET 0
+ WIN64_RESTORE_XMM_INTERNAL rsp
+ POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+%if mmsize == 32
+ vzeroupper
+%endif
+ ret
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi
+DECLARE_REG 1, rsi
+DECLARE_REG 2, rdx
+DECLARE_REG 3, rcx
+DECLARE_REG 4, R8
+DECLARE_REG 5, R9
+DECLARE_REG 6, rax, 8
+DECLARE_REG 7, R10, 16
+DECLARE_REG 8, R11, 24
+DECLARE_REG 9, rbx, 32
+DECLARE_REG 10, rbp, 40
+DECLARE_REG 11, R12, 48
+DECLARE_REG 12, R13, 56
+DECLARE_REG 13, R14, 64
+DECLARE_REG 14, R15, 72
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 9, 10, 11, 12, 13, 14
+ LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS %4
+%endmacro
+
+%define has_epilogue regs_used > 9 || mmsize == 32
+
+%macro RET 0
+ POP_IF_USED 14, 13, 12, 11, 10, 9
+%if mmsize == 32
+ vzeroupper
+%endif
+ ret
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, 4
+DECLARE_REG 1, ecx, 8
+DECLARE_REG 2, edx, 12
+DECLARE_REG 3, ebx, 16
+DECLARE_REG 4, esi, 20
+DECLARE_REG 5, edi, 24
+DECLARE_REG 6, ebp, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+ %rep %0
+ %define r%1m [esp + stack_offset + 4*%1 + 4]
+ %define r%1mp dword r%1m
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-4+ ; #args, #regs, #xmm_regs, arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ %if regs_used > 7
+ %assign regs_used 7
+ %endif
+ ASSERT regs_used >= num_args
+ PUSH_IF_USED 3, 4, 5, 6
+ LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+ DEFINE_ARGS %4
+%endmacro
+
+%define has_epilogue regs_used > 3 || mmsize == 32
+
+%macro RET 0
+ POP_IF_USED 6, 5, 4, 3
+%if mmsize == 32
+ vzeroupper
+%endif
+ ret
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+%macro WIN64_SPILL_XMM 1
+%endmacro
+%macro WIN64_RESTORE_XMM 1
+%endmacro
+%endif
+
+%macro REP_RET 0
+ %if has_epilogue
+ RET
+ %else
+ rep ret
+ %endif
+%endmacro
+
+%macro TAIL_CALL 2 ; callee, is_nonadjacent
+ %if has_epilogue
+ call %1
+ RET
+ %elif %2
+ jmp %1
+ %endif
+%endmacro
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+%macro cglobal 1-2+ ; name, [PROLOGUE args]
+%if %0 == 1
+ cglobal_internal %1 %+ SUFFIX
+%else
+ cglobal_internal %1 %+ SUFFIX, %2
+%endif
+%endmacro
+%macro cglobal_internal 1-2+
+ %ifndef cglobaled_%1
+ %xdefine %1 mangle(%1)
+ %xdefine %1.skip_prologue %1 %+ .skip_prologue
+ CAT_XDEFINE cglobaled_, %1, 1
+ %endif
+ %xdefine current_function %1
+ %ifidn __OUTPUT_FORMAT__,elf
+ global %1:function hidden
+ %else
+ global %1
+ %endif
+ align function_align
+ %1:
+ RESET_MM_PERMUTATION ; not really needed, but makes disassembly somewhat nicer
+ %assign stack_offset 0
+ %if %0 > 1
+ PROLOGUE %2
+ %endif
+%endmacro
+
+%macro cextern 1
+ %xdefine %1 mangle(%1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+ %xdefine %1 mangle(%1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+%macro const 2+
+ %xdefine %1 mangle(%1)
+ global %1
+ %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is
+; executable by default.
+%ifidn __OUTPUT_FORMAT__,elf
+SECTION .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+%ifidn __OUTPUT_FORMAT__,elf32
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+%ifidn __OUTPUT_FORMAT__,elf64
+section .note.GNU-stack noalloc noexec nowrite progbits
+%endif
+
+; cpuflags
+
+%assign cpuflags_MMX (1<<0)
+%assign cpuflags_MMX2 (1<<1) | cpuflags_MMX
+%assign cpuflags_3dnow (1<<2) | cpuflags_MMX
+%assign cpuflags_3dnow2 (1<<3) | cpuflags_3dnow
+%assign cpuflags_SSE (1<<4) | cpuflags_MMX2
+%assign cpuflags_SSE2 (1<<5) | cpuflags_SSE
+%assign cpuflags_SSE2slow (1<<6) | cpuflags_SSE2
+%assign cpuflags_SSE3 (1<<7) | cpuflags_SSE2
+%assign cpuflags_SSSE3 (1<<8) | cpuflags_SSE3
+%assign cpuflags_SSE4 (1<<9) | cpuflags_SSSE3
+%assign cpuflags_SSE42 (1<<10)| cpuflags_SSE4
+%assign cpuflags_AVX (1<<11)| cpuflags_SSE42
+%assign cpuflags_xop (1<<12)| cpuflags_AVX
+%assign cpuflags_fma4 (1<<13)| cpuflags_AVX
+%assign cpuflags_AVX2 (1<<14)| cpuflags_AVX
+%assign cpuflags_fma3 (1<<15)| cpuflags_AVX
+
+%assign cpuflags_cache32 (1<<16)
+%assign cpuflags_cache64 (1<<17)
+%assign cpuflags_slowctz (1<<18)
+%assign cpuflags_lzcnt (1<<19)
+%assign cpuflags_misalign (1<<20)
+%assign cpuflags_aligned (1<<21) ; not a cpu feature, but a function variant
+%assign cpuflags_atom (1<<22)
+%assign cpuflags_bmi1 (1<<23)
+%assign cpuflags_bmi2 (1<<24)|cpuflags_bmi1
+%assign cpuflags_tbm (1<<25)|cpuflags_bmi1
+
+%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x))
+%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x))
+
+; Takes up to 2 cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-2
+ %if %0 >= 1
+ %xdefine cpuname %1
+ %assign cpuflags cpuflags_%1
+ %if %0 >= 2
+ %xdefine cpuname %1_%2
+ %assign cpuflags cpuflags | cpuflags_%2
+ %endif
+ %xdefine SUFFIX _ %+ cpuname
+ %if cpuflag(AVX)
+ %assign AVX_enabled 1
+ %endif
+ %if mmsize == 16 && notcpuflag(SSE2)
+ %define mova movaps
+ %define movu movups
+ %define movnta movntps
+ %endif
+ %if cpuflag(aligned)
+ %define movu mova
+ %elifidn %1, SSE3
+ %define movu lddqu
+ %endif
+ %else
+ %xdefine SUFFIX
+ %undef cpuname
+ %undef cpuflags
+ %endif
+%endmacro
+
+; merge MMX and SSE*
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0-1+
+ %assign AVX_enabled 0
+ %define RESET_MM_PERMUTATION INIT_MMX %1
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnta movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nmm, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_XMM 0-1+
+ %assign AVX_enabled 0
+ %define RESET_MM_PERMUTATION INIT_XMM %1
+ %define mmsize 16
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnta movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_YMM 0-1+
+ %assign AVX_enabled 1
+ %define RESET_MM_PERMUTATION INIT_YMM %1
+ %define mmsize 32
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova vmovaps
+ %define movu vmovups
+ %undef movh
+ %define movnta vmovntps
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nymm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+INIT_XMM
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+%rep %0/2
+ %xdefine tmp%2 m%2
+ %xdefine ntmp%2 nm%2
+ %rotate 2
+%endrep
+%rep %0/2
+ %xdefine m%1 tmp%2
+ %xdefine nm%1 ntmp%2
+ %undef tmp%2
+ %undef ntmp%2
+ %rotate 2
+%endrep
+%endmacro
+
+%macro SWAP 2-* ; swaps a single chain (sometimes more concise than pairs)
+%rep %0-1
+%ifdef m%1
+ %xdefine tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 tmp
+ CAT_XDEFINE n, m%1, %1
+ CAT_XDEFINE n, m%2, %2
+%else
+ ; If we were called as "SWAP m0,m1" rather than "SWAP 0,1" infer the original numbers here.
+ ; Be careful using this mode in nested macros though, as in some cases there may be
+ ; other copies of m# that have already been dereferenced and don't get updated correctly.
+ %xdefine %%n1 n %+ %1
+ %xdefine %%n2 n %+ %2
+ %xdefine tmp m %+ %%n1
+ CAT_XDEFINE m, %%n1, m %+ %%n2
+ CAT_XDEFINE m, %%n2, tmp
+ CAT_XDEFINE n, m %+ %%n1, %%n1
+ CAT_XDEFINE n, m %+ %%n2, %%n2
+%endif
+ %undef tmp
+ %rotate 1
+%endrep
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+ %if %0
+ %xdefine %%f %1_m
+ %else
+ %xdefine %%f current_function %+ _m
+ %endif
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %%f, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+ %ifdef %1_m0
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE n, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+ call_internal %1, %1 %+ SUFFIX
+%endmacro
+%macro call_internal 2
+ %xdefine %%i %1
+ %ifndef cglobaled_%1
+ %ifdef cglobaled_%2
+ %xdefine %%i %2
+ %endif
+ %endif
+ call %%i
+ LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 16
+ %if i < 8
+ CAT_XDEFINE sizeofmm, i, 8
+ %endif
+ CAT_XDEFINE sizeofxmm, i, 16
+ CAT_XDEFINE sizeofymm, i, 32
+%assign i i+1
+%endrep
+%undef i
+
+%macro CHECK_AVX_INSTR_EMU 3-*
+ %xdefine %%opcode %1
+ %xdefine %%dst %2
+ %rep %0-2
+ %ifidn %%dst, %3
+ %error non-AVX emulation of ``%%opcode'' is not supported
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
+;%4 == number of operands given
+;%5+: operands
+%macro RUN_AVX_INSTR 6-7+
+ %ifid %6
+ %define %%sizeofreg sizeof%6
+ %elifid %5
+ %define %%sizeofreg sizeof%5
+ %else
+ %define %%sizeofreg mmsize
+ %endif
+ %if %%sizeofreg==32
+ %if %4>=3
+ v%1 %5, %6, %7
+ %else
+ v%1 %5, %6
+ %endif
+ %else
+ %if %%sizeofreg==8
+ %define %%regmov movq
+ %elif %2
+ %define %%regmov movaps
+ %else
+ %define %%regmov movdqa
+ %endif
+
+ %if %4>=3+%3
+ %ifnidn %5, %6
+ %if AVX_enabled && %%sizeofreg==16
+ v%1 %5, %6, %7
+ %else
+ CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7
+ %%regmov %5, %6
+ %1 %5, %7
+ %endif
+ %else
+ %1 %5, %7
+ %endif
+ %elif %4>=3
+ %1 %5, %6, %7
+ %else
+ %1 %5, %6
+ %endif
+ %endif
+%endmacro
+
+; 3arg AVX ops with a memory arg can only have it in src2,
+; whereas SSE emulation of 3arg prefers to have it in src1 (i.e. the mov).
+; So, if the op is symmetric and the wrong one is memory, swap them.
+%macro RUN_AVX_INSTR1 8
+ %assign %%swap 0
+ %if AVX_enabled
+ %ifnid %6
+ %assign %%swap 1
+ %endif
+ %elifnidn %5, %6
+ %ifnid %7
+ %assign %%swap 1
+ %endif
+ %endif
+ %if %%swap && %3 == 0 && %8 == 1
+ RUN_AVX_INSTR %1, %2, %3, %4, %5, %7, %6
+ %else
+ RUN_AVX_INSTR %1, %2, %3, %4, %5, %6, %7
+ %endif
+%endmacro
+
+;%1 == instruction
+;%2 == 1 if float, 0 if int
+;%3 == 1 if 4-operand (xmm, xmm, xmm, imm), 0 if 2- or 3-operand (xmm, xmm, xmm)
+;%4 == 1 if symmetric (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 4
+ %macro %1 2-9 fnord, fnord, fnord, %1, %2, %3, %4
+ %ifidn %3, fnord
+ RUN_AVX_INSTR %6, %7, %8, 2, %1, %2
+ %elifidn %4, fnord
+ RUN_AVX_INSTR1 %6, %7, %8, 3, %1, %2, %3, %9
+ %elifidn %5, fnord
+ RUN_AVX_INSTR %6, %7, %8, 4, %1, %2, %3, %4
+ %else
+ RUN_AVX_INSTR %6, %7, %8, 5, %1, %2, %3, %4, %5
+ %endif
+ %endmacro
+%endmacro
+
+AVX_INSTR addpd, 1, 0, 1
+AVX_INSTR addps, 1, 0, 1
+AVX_INSTR addsd, 1, 0, 1
+AVX_INSTR addss, 1, 0, 1
+AVX_INSTR addsubpd, 1, 0, 0
+AVX_INSTR addsubps, 1, 0, 0
+AVX_INSTR andpd, 1, 0, 1
+AVX_INSTR andps, 1, 0, 1
+AVX_INSTR andnpd, 1, 0, 0
+AVX_INSTR andnps, 1, 0, 0
+AVX_INSTR blendpd, 1, 0, 0
+AVX_INSTR blendps, 1, 0, 0
+AVX_INSTR blendvpd, 1, 0, 0
+AVX_INSTR blendvps, 1, 0, 0
+AVX_INSTR cmppd, 1, 0, 0
+AVX_INSTR cmpps, 1, 0, 0
+AVX_INSTR cmpsd, 1, 0, 0
+AVX_INSTR cmpss, 1, 0, 0
+AVX_INSTR cvtdq2ps, 1, 0, 0
+AVX_INSTR cvtps2dq, 1, 0, 0
+AVX_INSTR divpd, 1, 0, 0
+AVX_INSTR divps, 1, 0, 0
+AVX_INSTR divsd, 1, 0, 0
+AVX_INSTR divss, 1, 0, 0
+AVX_INSTR dppd, 1, 1, 0
+AVX_INSTR dpps, 1, 1, 0
+AVX_INSTR haddpd, 1, 0, 0
+AVX_INSTR haddps, 1, 0, 0
+AVX_INSTR hsubpd, 1, 0, 0
+AVX_INSTR hsubps, 1, 0, 0
+AVX_INSTR maxpd, 1, 0, 1
+AVX_INSTR maxps, 1, 0, 1
+AVX_INSTR maxsd, 1, 0, 1
+AVX_INSTR maxss, 1, 0, 1
+AVX_INSTR minpd, 1, 0, 1
+AVX_INSTR minps, 1, 0, 1
+AVX_INSTR minsd, 1, 0, 1
+AVX_INSTR minss, 1, 0, 1
+AVX_INSTR movhlps, 1, 0, 0
+AVX_INSTR movlhps, 1, 0, 0
+AVX_INSTR movsd, 1, 0, 0
+AVX_INSTR movss, 1, 0, 0
+AVX_INSTR mpsadbw, 0, 1, 0
+AVX_INSTR mulpd, 1, 0, 1
+AVX_INSTR mulps, 1, 0, 1
+AVX_INSTR mulsd, 1, 0, 1
+AVX_INSTR mulss, 1, 0, 1
+AVX_INSTR orpd, 1, 0, 1
+AVX_INSTR orps, 1, 0, 1
+AVX_INSTR pabsb, 0, 0, 0
+AVX_INSTR pabsw, 0, 0, 0
+AVX_INSTR pabsd, 0, 0, 0
+AVX_INSTR packsswb, 0, 0, 0
+AVX_INSTR packssdw, 0, 0, 0
+AVX_INSTR packuswb, 0, 0, 0
+AVX_INSTR packusdw, 0, 0, 0
+AVX_INSTR paddb, 0, 0, 1
+AVX_INSTR paddw, 0, 0, 1
+AVX_INSTR paddd, 0, 0, 1
+AVX_INSTR paddq, 0, 0, 1
+AVX_INSTR paddsb, 0, 0, 1
+AVX_INSTR paddsw, 0, 0, 1
+AVX_INSTR paddusb, 0, 0, 1
+AVX_INSTR paddusw, 0, 0, 1
+AVX_INSTR palignr, 0, 1, 0
+AVX_INSTR pand, 0, 0, 1
+AVX_INSTR pandn, 0, 0, 0
+AVX_INSTR pavgb, 0, 0, 1
+AVX_INSTR pavgw, 0, 0, 1
+AVX_INSTR pblendvb, 0, 0, 0
+AVX_INSTR pblendw, 0, 1, 0
+AVX_INSTR pcmpestri, 0, 0, 0
+AVX_INSTR pcmpestrm, 0, 0, 0
+AVX_INSTR pcmpistri, 0, 0, 0
+AVX_INSTR pcmpistrm, 0, 0, 0
+AVX_INSTR pcmpeqb, 0, 0, 1
+AVX_INSTR pcmpeqw, 0, 0, 1
+AVX_INSTR pcmpeqd, 0, 0, 1
+AVX_INSTR pcmpeqq, 0, 0, 1
+AVX_INSTR pcmpgtb, 0, 0, 0
+AVX_INSTR pcmpgtw, 0, 0, 0
+AVX_INSTR pcmpgtd, 0, 0, 0
+AVX_INSTR pcmpgtq, 0, 0, 0
+AVX_INSTR phaddw, 0, 0, 0
+AVX_INSTR phaddd, 0, 0, 0
+AVX_INSTR phaddsw, 0, 0, 0
+AVX_INSTR phsubw, 0, 0, 0
+AVX_INSTR phsubd, 0, 0, 0
+AVX_INSTR phsubsw, 0, 0, 0
+AVX_INSTR pmaddwd, 0, 0, 1
+AVX_INSTR pmaddubsw, 0, 0, 0
+AVX_INSTR pmaxsb, 0, 0, 1
+AVX_INSTR pmaxsw, 0, 0, 1
+AVX_INSTR pmaxsd, 0, 0, 1
+AVX_INSTR pmaxub, 0, 0, 1
+AVX_INSTR pmaxuw, 0, 0, 1
+AVX_INSTR pmaxud, 0, 0, 1
+AVX_INSTR pminsb, 0, 0, 1
+AVX_INSTR pminsw, 0, 0, 1
+AVX_INSTR pminsd, 0, 0, 1
+AVX_INSTR pminub, 0, 0, 1
+AVX_INSTR pminuw, 0, 0, 1
+AVX_INSTR pminud, 0, 0, 1
+AVX_INSTR pmovmskb, 0, 0, 0
+AVX_INSTR pmulhuw, 0, 0, 1
+AVX_INSTR pmulhrsw, 0, 0, 1
+AVX_INSTR pmulhw, 0, 0, 1
+AVX_INSTR pmullw, 0, 0, 1
+AVX_INSTR pmulld, 0, 0, 1
+AVX_INSTR pmuludq, 0, 0, 1
+AVX_INSTR pmuldq, 0, 0, 1
+AVX_INSTR por, 0, 0, 1
+AVX_INSTR psadbw, 0, 0, 1
+AVX_INSTR pshufb, 0, 0, 0
+AVX_INSTR pshufd, 0, 1, 0
+AVX_INSTR pshufhw, 0, 1, 0
+AVX_INSTR pshuflw, 0, 1, 0
+AVX_INSTR psignb, 0, 0, 0
+AVX_INSTR psignw, 0, 0, 0
+AVX_INSTR psignd, 0, 0, 0
+AVX_INSTR psllw, 0, 0, 0
+AVX_INSTR pslld, 0, 0, 0
+AVX_INSTR psllq, 0, 0, 0
+AVX_INSTR pslldq, 0, 0, 0
+AVX_INSTR psraw, 0, 0, 0
+AVX_INSTR psrad, 0, 0, 0
+AVX_INSTR psrlw, 0, 0, 0
+AVX_INSTR psrld, 0, 0, 0
+AVX_INSTR psrlq, 0, 0, 0
+AVX_INSTR psrldq, 0, 0, 0
+AVX_INSTR psubb, 0, 0, 0
+AVX_INSTR psubw, 0, 0, 0
+AVX_INSTR psubd, 0, 0, 0
+AVX_INSTR psubq, 0, 0, 0
+AVX_INSTR psubsb, 0, 0, 0
+AVX_INSTR psubsw, 0, 0, 0
+AVX_INSTR psubusb, 0, 0, 0
+AVX_INSTR psubusw, 0, 0, 0
+AVX_INSTR ptest, 0, 0, 0
+AVX_INSTR punpckhbw, 0, 0, 0
+AVX_INSTR punpckhwd, 0, 0, 0
+AVX_INSTR punpckhdq, 0, 0, 0
+AVX_INSTR punpckhqdq, 0, 0, 0
+AVX_INSTR punpcklbw, 0, 0, 0
+AVX_INSTR punpcklwd, 0, 0, 0
+AVX_INSTR punpckldq, 0, 0, 0
+AVX_INSTR punpcklqdq, 0, 0, 0
+AVX_INSTR pxor, 0, 0, 1
+AVX_INSTR shufps, 1, 1, 0
+AVX_INSTR subpd, 1, 0, 0
+AVX_INSTR subps, 1, 0, 0
+AVX_INSTR subsd, 1, 0, 0
+AVX_INSTR subss, 1, 0, 0
+AVX_INSTR unpckhpd, 1, 0, 0
+AVX_INSTR unpckhps, 1, 0, 0
+AVX_INSTR unpcklpd, 1, 0, 0
+AVX_INSTR unpcklps, 1, 0, 0
+AVX_INSTR xorpd, 1, 0, 1
+AVX_INSTR xorps, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 1, 0, 1
+AVX_INSTR pfsub, 1, 0, 0
+AVX_INSTR pfmul, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+ %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+ %if j < 10
+ CAT_XDEFINE q000, j, i
+ %elif j < 100
+ CAT_XDEFINE q00, j, i
+ %elif j < 1000
+ CAT_XDEFINE q0, j, i
+ %else
+ CAT_XDEFINE q, j, i
+ %endif
+%assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+ %macro %1 4-7 %1, %2, %3
+ %if cpuflag(xop)
+ v%5 %1, %2, %3, %4
+ %else
+ %6 %1, %2, %3
+ %7 %1, %4
+ %endif
+ %endmacro
+%endmacro
+
+FMA_INSTR pmacsdd, pmulld, paddd
+FMA_INSTR pmacsww, pmullw, paddw
+FMA_INSTR pmadcswd, pmaddwd, paddd
+
+; tzcnt is equivalent to "rep bsf" and is backwards-compatible with bsf.
+; This lets us use tzcnt without bumping the yasm version requirement yet.
+%define tzcnt rep bsf
diff --git a/media/libaom/src/third_party/vector/LICENSE b/media/libaom/src/third_party/vector/LICENSE
new file mode 100644
index 000000000..afcb9f00a
--- /dev/null
+++ b/media/libaom/src/third_party/vector/LICENSE
@@ -0,0 +1,19 @@
+The MIT License (MIT)
+Copyright (c) 2016 Peter Goldsborough
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/media/libaom/src/third_party/vector/README.libaom b/media/libaom/src/third_party/vector/README.libaom
new file mode 100644
index 000000000..2bb8b2d5d
--- /dev/null
+++ b/media/libaom/src/third_party/vector/README.libaom
@@ -0,0 +1,14 @@
+Name: vector
+URL: https://github.com/goldsborough/vector
+Version: commit-id: 40efe82
+License: MIT
+License File: LICENSE
+
+Description:
+A feature-complete, generic and customizable resizable
+array implementation in pure C that supports almost
+the entire C++ std::vector API, including iterators.
+
+Local Modifications:
+Renamed some functions to fit in with the AOMedia
+naming convention.
diff --git a/media/libaom/src/third_party/vector/vector.c b/media/libaom/src/third_party/vector/vector.c
new file mode 100644
index 000000000..fe46246a1
--- /dev/null
+++ b/media/libaom/src/third_party/vector/vector.c
@@ -0,0 +1,543 @@
+/*
+The MIT License(MIT)
+Copyright(c) 2016 Peter Goldsborough
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files(the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions :
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#define __STDC_WANT_LIB_EXT1__ 1
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "third_party/vector/vector.h"
+
+int aom_vector_setup(Vector *vector, size_t capacity, size_t element_size) {
+ assert(vector != NULL);
+
+ if (vector == NULL) return VECTOR_ERROR;
+
+ vector->size = 0;
+ vector->capacity = MAX(VECTOR_MINIMUM_CAPACITY, capacity);
+ vector->element_size = element_size;
+ vector->data = malloc(vector->capacity * element_size);
+
+ return vector->data == NULL ? VECTOR_ERROR : VECTOR_SUCCESS;
+}
+
+int aom_vector_copy(Vector *destination, Vector *source) {
+ assert(destination != NULL);
+ assert(source != NULL);
+ assert(aom_vector_is_initialized(source));
+ assert(!aom_vector_is_initialized(destination));
+
+ if (destination == NULL) return VECTOR_ERROR;
+ if (source == NULL) return VECTOR_ERROR;
+ if (aom_vector_is_initialized(destination)) return VECTOR_ERROR;
+ if (!aom_vector_is_initialized(source)) return VECTOR_ERROR;
+
+ /* Copy ALL the data */
+ destination->size = source->size;
+ destination->capacity = source->size * 2;
+ destination->element_size = source->element_size;
+
+ /* Note that we are not necessarily allocating the same capacity */
+ destination->data = malloc(destination->capacity * source->element_size);
+ if (destination->data == NULL) return VECTOR_ERROR;
+
+ memcpy(destination->data, source->data, aom_vector_byte_size(source));
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_copy_assign(Vector *destination, Vector *source) {
+ assert(destination != NULL);
+ assert(source != NULL);
+ assert(aom_vector_is_initialized(source));
+ assert(aom_vector_is_initialized(destination));
+
+ if (destination == NULL) return VECTOR_ERROR;
+ if (source == NULL) return VECTOR_ERROR;
+ if (!aom_vector_is_initialized(destination)) return VECTOR_ERROR;
+ if (!aom_vector_is_initialized(source)) return VECTOR_ERROR;
+
+ aom_vector_destroy(destination);
+
+ return aom_vector_copy(destination, source);
+}
+
+int aom_vector_move(Vector *destination, Vector *source) {
+ assert(destination != NULL);
+ assert(source != NULL);
+
+ if (destination == NULL) return VECTOR_ERROR;
+ if (source == NULL) return VECTOR_ERROR;
+
+ *destination = *source;
+ source->data = NULL;
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_move_assign(Vector *destination, Vector *source) {
+ aom_vector_swap(destination, source);
+ return aom_vector_destroy(source);
+}
+
+int aom_vector_swap(Vector *destination, Vector *source) {
+ void *temp;
+
+ assert(destination != NULL);
+ assert(source != NULL);
+ assert(aom_vector_is_initialized(source));
+ assert(aom_vector_is_initialized(destination));
+
+ if (destination == NULL) return VECTOR_ERROR;
+ if (source == NULL) return VECTOR_ERROR;
+ if (!aom_vector_is_initialized(destination)) return VECTOR_ERROR;
+ if (!aom_vector_is_initialized(source)) return VECTOR_ERROR;
+
+ _vector_swap(&destination->size, &source->size);
+ _vector_swap(&destination->capacity, &source->capacity);
+ _vector_swap(&destination->element_size, &source->element_size);
+
+ temp = destination->data;
+ destination->data = source->data;
+ source->data = temp;
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_destroy(Vector *vector) {
+ assert(vector != NULL);
+
+ if (vector == NULL) return VECTOR_ERROR;
+
+ free(vector->data);
+ vector->data = NULL;
+
+ return VECTOR_SUCCESS;
+}
+
+/* Insertion */
+int aom_vector_push_back(Vector *vector, void *element) {
+ assert(vector != NULL);
+ assert(element != NULL);
+
+ if (_vector_should_grow(vector)) {
+ if (_vector_adjust_capacity(vector) == VECTOR_ERROR) {
+ return VECTOR_ERROR;
+ }
+ }
+
+ _vector_assign(vector, vector->size, element);
+
+ ++vector->size;
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_push_front(Vector *vector, void *element) {
+ return aom_vector_insert(vector, 0, element);
+}
+
+int aom_vector_insert(Vector *vector, size_t index, void *element) {
+ void *offset;
+
+ assert(vector != NULL);
+ assert(element != NULL);
+ assert(index <= vector->size);
+
+ if (vector == NULL) return VECTOR_ERROR;
+ if (element == NULL) return VECTOR_ERROR;
+ if (vector->element_size == 0) return VECTOR_ERROR;
+ if (index > vector->size) return VECTOR_ERROR;
+
+ if (_vector_should_grow(vector)) {
+ if (_vector_adjust_capacity(vector) == VECTOR_ERROR) {
+ return VECTOR_ERROR;
+ }
+ }
+
+ /* Move other elements to the right */
+ if (_vector_move_right(vector, index) == VECTOR_ERROR) {
+ return VECTOR_ERROR;
+ }
+
+ /* Insert the element */
+ offset = _vector_offset(vector, index);
+ memcpy(offset, element, vector->element_size);
+ ++vector->size;
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_assign(Vector *vector, size_t index, void *element) {
+ assert(vector != NULL);
+ assert(element != NULL);
+ assert(index < vector->size);
+
+ if (vector == NULL) return VECTOR_ERROR;
+ if (element == NULL) return VECTOR_ERROR;
+ if (vector->element_size == 0) return VECTOR_ERROR;
+ if (index >= vector->size) return VECTOR_ERROR;
+
+ _vector_assign(vector, index, element);
+
+ return VECTOR_SUCCESS;
+}
+
+/* Deletion */
+int aom_vector_pop_back(Vector *vector) {
+ assert(vector != NULL);
+ assert(vector->size > 0);
+
+ if (vector == NULL) return VECTOR_ERROR;
+ if (vector->element_size == 0) return VECTOR_ERROR;
+
+ --vector->size;
+
+#ifndef VECTOR_NO_SHRINK
+ if (_vector_should_shrink(vector)) {
+ _vector_adjust_capacity(vector);
+ }
+#endif
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_pop_front(Vector *vector) { return aom_vector_erase(vector, 0); }
+
+int aom_vector_erase(Vector *vector, size_t index) {
+ assert(vector != NULL);
+ assert(index < vector->size);
+
+ if (vector == NULL) return VECTOR_ERROR;
+ if (vector->element_size == 0) return VECTOR_ERROR;
+ if (index >= vector->size) return VECTOR_ERROR;
+
+ /* Just overwrite */
+ _vector_move_left(vector, index);
+
+#ifndef VECTOR_NO_SHRINK
+ if (--vector->size == vector->capacity / 4) {
+ _vector_adjust_capacity(vector);
+ }
+#endif
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_clear(Vector *vector) { return aom_vector_resize(vector, 0); }
+
+/* Lookup */
+void *aom_vector_get(Vector *vector, size_t index) {
+ assert(vector != NULL);
+ assert(index < vector->size);
+
+ if (vector == NULL) return NULL;
+ if (vector->element_size == 0) return NULL;
+ if (index >= vector->size) return NULL;
+
+ return _vector_offset(vector, index);
+}
+
+const void *aom_vector_const_get(const Vector *vector, size_t index) {
+ assert(vector != NULL);
+ assert(index < vector->size);
+
+ if (vector == NULL) return NULL;
+ if (vector->element_size == 0) return NULL;
+ if (index >= vector->size) return NULL;
+
+ return _vector_const_offset(vector, index);
+}
+
+void *aom_vector_front(Vector *vector) { return aom_vector_get(vector, 0); }
+
+void *aom_vector_back(Vector *vector) {
+ return aom_vector_get(vector, vector->size - 1);
+}
+
+/* Information */
+
+bool aom_vector_is_initialized(const Vector *vector) {
+ return vector->data != NULL;
+}
+
+size_t aom_vector_byte_size(const Vector *vector) {
+ return vector->size * vector->element_size;
+}
+
+size_t aom_vector_free_space(const Vector *vector) {
+ return vector->capacity - vector->size;
+}
+
+bool aom_vector_is_empty(const Vector *vector) { return vector->size == 0; }
+
+/* Memory management */
+int aom_vector_resize(Vector *vector, size_t new_size) {
+ if (new_size <= vector->capacity * VECTOR_SHRINK_THRESHOLD) {
+ vector->size = new_size;
+ if (_vector_reallocate(vector, new_size * VECTOR_GROWTH_FACTOR) == -1) {
+ return VECTOR_ERROR;
+ }
+ } else if (new_size > vector->capacity) {
+ if (_vector_reallocate(vector, new_size * VECTOR_GROWTH_FACTOR) == -1) {
+ return VECTOR_ERROR;
+ }
+ }
+
+ vector->size = new_size;
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_reserve(Vector *vector, size_t minimum_capacity) {
+ if (minimum_capacity > vector->capacity) {
+ if (_vector_reallocate(vector, minimum_capacity) == VECTOR_ERROR) {
+ return VECTOR_ERROR;
+ }
+ }
+
+ return VECTOR_SUCCESS;
+}
+
+int aom_vector_shrink_to_fit(Vector *vector) {
+ return _vector_reallocate(vector, vector->size);
+}
+
+/* Iterators */
+Iterator aom_vector_begin(Vector *vector) { return aom_vector_iterator(vector, 0); }
+
+Iterator aom_vector_end(Vector *vector) {
+ return aom_vector_iterator(vector, vector->size);
+}
+
+Iterator aom_vector_iterator(Vector *vector, size_t index) {
+ Iterator iterator = { NULL, 0 };
+
+ assert(vector != NULL);
+ assert(index <= vector->size);
+
+ if (vector == NULL) return iterator;
+ if (index > vector->size) return iterator;
+ if (vector->element_size == 0) return iterator;
+
+ iterator.pointer = _vector_offset(vector, index);
+ iterator.element_size = vector->element_size;
+
+ return iterator;
+}
+
+void *iterator_get(Iterator *iterator) { return iterator->pointer; }
+
+int iterator_erase(Vector *vector, Iterator *iterator) {
+ size_t index = iterator_index(vector, iterator);
+
+ if (aom_vector_erase(vector, index) == VECTOR_ERROR) {
+ return VECTOR_ERROR;
+ }
+
+ *iterator = aom_vector_iterator(vector, index);
+
+ return VECTOR_SUCCESS;
+}
+
+void iterator_increment(Iterator *iterator) {
+ assert(iterator != NULL);
+ // iterator->pointer += iterator->element_size;
+ iterator->pointer =
+ (unsigned char *)iterator->pointer + iterator->element_size;
+}
+
+void iterator_decrement(Iterator *iterator) {
+ assert(iterator != NULL);
+ // iterator->pointer -= iterator->element_size;
+ iterator->pointer =
+ (unsigned char *)iterator->pointer - iterator->element_size;
+}
+
+void *iterator_next(Iterator *iterator) {
+ void *current = iterator->pointer;
+ iterator_increment(iterator);
+
+ return current;
+}
+
+void *iterator_previous(Iterator *iterator) {
+ void *current = iterator->pointer;
+ iterator_decrement(iterator);
+
+ return current;
+}
+
+bool iterator_equals(Iterator *first, Iterator *second) {
+ assert(first->element_size == second->element_size);
+ return first->pointer == second->pointer;
+}
+
+bool iterator_is_before(Iterator *first, Iterator *second) {
+ assert(first->element_size == second->element_size);
+ return first->pointer < second->pointer;
+}
+
+bool iterator_is_after(Iterator *first, Iterator *second) {
+ assert(first->element_size == second->element_size);
+ return first->pointer > second->pointer;
+}
+
+size_t iterator_index(Vector *vector, Iterator *iterator) {
+ assert(vector != NULL);
+ assert(iterator != NULL);
+ // return (iterator->pointer - vector->data) / vector->element_size;
+ return ((unsigned char *)iterator->pointer - (unsigned char *)vector->data) /
+ vector->element_size;
+}
+
+/***** PRIVATE *****/
+
+bool _vector_should_grow(Vector *vector) {
+ assert(vector->size <= vector->capacity);
+ return vector->size == vector->capacity;
+}
+
+bool _vector_should_shrink(Vector *vector) {
+ assert(vector->size <= vector->capacity);
+ return vector->size == vector->capacity * VECTOR_SHRINK_THRESHOLD;
+}
+
+size_t _vector_free_bytes(const Vector *vector) {
+ return aom_vector_free_space(vector) * vector->element_size;
+}
+
+void *_vector_offset(Vector *vector, size_t index) {
+ // return vector->data + (index * vector->element_size);
+ return (unsigned char *)vector->data + (index * vector->element_size);
+}
+
+const void *_vector_const_offset(const Vector *vector, size_t index) {
+ // return vector->data + (index * vector->element_size);
+ return (unsigned char *)vector->data + (index * vector->element_size);
+}
+
+void _vector_assign(Vector *vector, size_t index, void *element) {
+ /* Insert the element */
+ void *offset = _vector_offset(vector, index);
+ memcpy(offset, element, vector->element_size);
+}
+
+int _vector_move_right(Vector *vector, size_t index) {
+ assert(vector->size < vector->capacity);
+
+ /* The location where to start to move from. */
+ void *offset = _vector_offset(vector, index);
+
+ /* How many to move to the right. */
+ size_t elements_in_bytes = (vector->size - index) * vector->element_size;
+
+#ifdef __STDC_LIB_EXT1__
+ size_t right_capacity_in_bytes =
+ (vector->capacity - (index + 1)) * vector->element_size;
+
+ /* clang-format off */
+ int return_code = memmove_s(
+ offset + vector->element_size,
+ right_capacity_in_bytes,
+ offset,
+ elements_in_bytes);
+
+ /* clang-format on */
+
+ return return_code == 0 ? VECTOR_SUCCESS : VECTOR_ERROR;
+
+#else
+ // memmove(offset + vector->element_size, offset, elements_in_bytes);
+ memmove((unsigned char *)offset + vector->element_size, offset,
+ elements_in_bytes);
+ return VECTOR_SUCCESS;
+#endif
+}
+
+void _vector_move_left(Vector *vector, size_t index) {
+ size_t right_elements_in_bytes;
+ void *offset;
+
+ /* The offset into the memory */
+ offset = _vector_offset(vector, index);
+
+ /* How many to move to the left */
+ right_elements_in_bytes = (vector->size - index - 1) * vector->element_size;
+
+ // memmove(offset, offset + vector->element_size, right_elements_in_bytes);
+ memmove(offset, (unsigned char *)offset + vector->element_size,
+ right_elements_in_bytes);
+}
+
+int _vector_adjust_capacity(Vector *vector) {
+ return _vector_reallocate(vector,
+ MAX(1, vector->size * VECTOR_GROWTH_FACTOR));
+}
+
+int _vector_reallocate(Vector *vector, size_t new_capacity) {
+ size_t new_capacity_in_bytes;
+ void *old;
+ assert(vector != NULL);
+
+ if (new_capacity < VECTOR_MINIMUM_CAPACITY) {
+ if (vector->capacity > VECTOR_MINIMUM_CAPACITY) {
+ new_capacity = VECTOR_MINIMUM_CAPACITY;
+ } else {
+ /* NO-OP */
+ return VECTOR_SUCCESS;
+ }
+ }
+
+ new_capacity_in_bytes = new_capacity * vector->element_size;
+ old = vector->data;
+
+ if ((vector->data = malloc(new_capacity_in_bytes)) == NULL) {
+ return VECTOR_ERROR;
+ }
+
+#ifdef __STDC_LIB_EXT1__
+ /* clang-format off */
+ if (memcpy_s(vector->data,
+ new_capacity_in_bytes,
+ old,
+ aom_vector_byte_size(vector)) != 0) {
+ return VECTOR_ERROR;
+ }
+/* clang-format on */
+#else
+ memcpy(vector->data, old, aom_vector_byte_size(vector));
+#endif
+
+ vector->capacity = new_capacity;
+
+ free(old);
+
+ return VECTOR_SUCCESS;
+}
+
+void _vector_swap(size_t *first, size_t *second) {
+ size_t temp = *first;
+ *first = *second;
+ *second = temp;
+}
diff --git a/media/libaom/src/third_party/vector/vector.h b/media/libaom/src/third_party/vector/vector.h
new file mode 100644
index 000000000..02743f5f1
--- /dev/null
+++ b/media/libaom/src/third_party/vector/vector.h
@@ -0,0 +1,159 @@
+/*
+The MIT License(MIT)
+Copyright(c) 2016 Peter Goldsborough
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files(the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software is furnished to do so,
+subject to the following conditions :
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.IN NO EVENT SHALL THE AUTHORS OR
+COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+#ifndef VECTOR_H
+#define VECTOR_H
+
+#include <stdbool.h>
+#include <stddef.h>
+
+/***** DEFINITIONS *****/
+
+#define VECTOR_MINIMUM_CAPACITY 2
+#define VECTOR_GROWTH_FACTOR 2
+#define VECTOR_SHRINK_THRESHOLD (1 / 4)
+
+#define VECTOR_ERROR -1
+#define VECTOR_SUCCESS 0
+
+#define VECTOR_UNINITIALIZED NULL
+#define VECTOR_INITIALIZER \
+ { 0, 0, 0, VECTOR_UNINITIALIZED }
+
+/***** STRUCTURES *****/
+
+typedef struct Vector {
+ size_t size;
+ size_t capacity;
+ size_t element_size;
+
+ void *data;
+} Vector;
+
+typedef struct Iterator {
+ void *pointer;
+ size_t element_size;
+} Iterator;
+
+/***** METHODS *****/
+
+/* Constructor */
+int aom_vector_setup(Vector *vector, size_t capacity, size_t element_size);
+
+/* Copy Constructor */
+int aom_vector_copy(Vector *destination, Vector *source);
+
+/* Copy Assignment */
+int aom_vector_copy_assign(Vector *destination, Vector *source);
+
+/* Move Constructor */
+int aom_vector_move(Vector *destination, Vector *source);
+
+/* Move Assignment */
+int aom_vector_move_assign(Vector *destination, Vector *source);
+
+int aom_vector_swap(Vector *destination, Vector *source);
+
+/* Destructor */
+int aom_vector_destroy(Vector *vector);
+
+/* Insertion */
+int aom_vector_push_back(Vector *vector, void *element);
+int aom_vector_push_front(Vector *vector, void *element);
+int aom_vector_insert(Vector *vector, size_t index, void *element);
+int aom_vector_assign(Vector *vector, size_t index, void *element);
+
+/* Deletion */
+int aom_vector_pop_back(Vector *vector);
+int aom_vector_pop_front(Vector *vector);
+int aom_vector_erase(Vector *vector, size_t index);
+int aom_vector_clear(Vector *vector);
+
+/* Lookup */
+void *aom_vector_get(Vector *vector, size_t index);
+const void *aom_vector_const_get(const Vector *vector, size_t index);
+void *aom_vector_front(Vector *vector);
+void *aom_vector_back(Vector *vector);
+#define VECTOR_GET_AS(type, aom_vector_pointer, index) \
+ *((type *)aom_vector_get((aom_vector_pointer), (index)))
+
+/* Information */
+bool aom_vector_is_initialized(const Vector *vector);
+size_t aom_vector_byte_size(const Vector *vector);
+size_t aom_vector_free_space(const Vector *vector);
+bool aom_vector_is_empty(const Vector *vector);
+
+/* Memory management */
+int aom_vector_resize(Vector *vector, size_t new_size);
+int aom_vector_reserve(Vector *vector, size_t minimum_capacity);
+int aom_vector_shrink_to_fit(Vector *vector);
+
+/* Iterators */
+Iterator aom_vector_begin(Vector *vector);
+Iterator aom_vector_end(Vector *vector);
+Iterator aom_vector_iterator(Vector *vector, size_t index);
+
+void *iterator_get(Iterator *iterator);
+#define ITERATOR_GET_AS(type, iterator) *((type *)iterator_get((iterator)))
+
+int iterator_erase(Vector *vector, Iterator *iterator);
+
+void iterator_increment(Iterator *iterator);
+void iterator_decrement(Iterator *iterator);
+
+void *iterator_next(Iterator *iterator);
+void *iterator_previous(Iterator *iterator);
+
+bool iterator_equals(Iterator *first, Iterator *second);
+bool iterator_is_before(Iterator *first, Iterator *second);
+bool iterator_is_after(Iterator *first, Iterator *second);
+
+size_t iterator_index(Vector *vector, Iterator *iterator);
+
+#define VECTOR_FOR_EACH(aom_vector_pointer, iterator_name) \
+ for (Iterator(iterator_name) = aom_vector_begin((aom_vector_pointer)), \
+ end = aom_vector_end((aom_vector_pointer)); \
+ !iterator_equals(&(iterator_name), &end); \
+ iterator_increment(&(iterator_name)))
+
+/***** PRIVATE *****/
+
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+bool _vector_should_grow(Vector *vector);
+bool _vector_should_shrink(Vector *vector);
+
+size_t _vector_free_bytes(const Vector *vector);
+void *_vector_offset(Vector *vector, size_t index);
+const void *_vector_const_offset(const Vector *vector, size_t index);
+
+void _vector_assign(Vector *vector, size_t index, void *element);
+
+int _vector_move_right(Vector *vector, size_t index);
+void _vector_move_left(Vector *vector, size_t index);
+
+int _vector_adjust_capacity(Vector *vector);
+int _vector_reallocate(Vector *vector, size_t new_capacity);
+
+void _vector_swap(size_t *first, size_t *second);
+
+#endif /* VECTOR_H */
diff --git a/media/libaom/src/third_party/x86inc/LICENSE b/media/libaom/src/third_party/x86inc/LICENSE
new file mode 100644
index 000000000..7d07645a1
--- /dev/null
+++ b/media/libaom/src/third_party/x86inc/LICENSE
@@ -0,0 +1,18 @@
+Copyright (C) 2005-2012 x264 project
+
+Authors: Loren Merritt <lorenm@u.washington.edu>
+ Anton Mitrofanov <BugMaster@narod.ru>
+ Jason Garrett-Glaser <darkshikari@gmail.com>
+ Henrik Gramner <hengar-6@student.ltu.se>
+
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/media/libaom/src/third_party/x86inc/README.libaom b/media/libaom/src/third_party/x86inc/README.libaom
new file mode 100644
index 000000000..07c4dad20
--- /dev/null
+++ b/media/libaom/src/third_party/x86inc/README.libaom
@@ -0,0 +1,20 @@
+URL: https://git.videolan.org/git/x264.git
+Version: d23d18655249944c1ca894b451e2c82c7a584c62
+License: ISC
+License File: LICENSE
+
+Description:
+x264/libav's framework for x86 assembly. Contains a variety of macros and
+defines that help automatically allow assembly to work cross-platform.
+
+Local Modifications:
+Get configuration from aom_config.asm.
+Prefix functions with aom by default.
+Manage name mangling (prefixing with '_') manually because 'PREFIX' does not
+ exist in libaom.
+Expand PIC default to macho64 and respect CONFIG_PIC from libaom
+Set 'private_extern' visibility for macho targets.
+Copy PIC 'GLOBAL' macros from x86_abi_support.asm
+Use .text instead of .rodata on macho to avoid broken tables in PIC mode.
+Use .text with no alignment for aout
+Only use 'hidden' visibility with Chromium
diff --git a/media/libaom/src/third_party/x86inc/x86inc.asm b/media/libaom/src/third_party/x86inc/x86inc.asm
new file mode 100644
index 000000000..adaf2d99e
--- /dev/null
+++ b/media/libaom/src/third_party/x86inc/x86inc.asm
@@ -0,0 +1,1649 @@
+;*****************************************************************************
+;* x86inc.asm: x264asm abstraction layer
+;*****************************************************************************
+;* Copyright (C) 2005-2016 x264 project
+;*
+;* Authors: Loren Merritt <lorenm@u.washington.edu>
+;* Anton Mitrofanov <BugMaster@narod.ru>
+;* Fiona Glaser <fiona@x264.com>
+;* Henrik Gramner <henrik@gramner.com>
+;*
+;* Permission to use, copy, modify, and/or distribute this software for any
+;* purpose with or without fee is hereby granted, provided that the above
+;* copyright notice and this permission notice appear in all copies.
+;*
+;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+;*****************************************************************************
+
+; This is a header file for the x264ASM assembly language, which uses
+; NASM/YASM syntax combined with a large number of macros to provide easy
+; abstraction between different calling conventions (x86_32, win64, linux64).
+; It also has various other useful features to simplify writing the kind of
+; DSP functions that are most often used in x264.
+
+; Unlike the rest of x264, this file is available under an ISC license, as it
+; has significant usefulness outside of x264 and we want it to be available
+; to the largest audience possible. Of course, if you modify it for your own
+; purposes to add a new feature, we strongly encourage contributing a patch
+; as this feature might be useful for others as well. Send patches or ideas
+; to x264-devel@videolan.org .
+
+%include "config/aom_config.asm"
+
+%ifndef private_prefix
+ %define private_prefix aom
+%endif
+
+%ifndef public_prefix
+ %define public_prefix private_prefix
+%endif
+
+%ifndef STACK_ALIGNMENT
+ %if ARCH_X86_64
+ %define STACK_ALIGNMENT 16
+ %else
+ %define STACK_ALIGNMENT 4
+ %endif
+%endif
+
+%define WIN64 0
+%define UNIX64 0
+%if ARCH_X86_64
+ %ifidn __OUTPUT_FORMAT__,win32
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,win64
+ %define WIN64 1
+ %elifidn __OUTPUT_FORMAT__,x64
+ %define WIN64 1
+ %else
+ %define UNIX64 1
+ %endif
+%endif
+
+%define FORMAT_ELF 0
+%ifidn __OUTPUT_FORMAT__,elf
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf32
+ %define FORMAT_ELF 1
+%elifidn __OUTPUT_FORMAT__,elf64
+ %define FORMAT_ELF 1
+%endif
+
+%define FORMAT_MACHO 0
+%ifidn __OUTPUT_FORMAT__,macho32
+ %define FORMAT_MACHO 1
+%elifidn __OUTPUT_FORMAT__,macho64
+ %define FORMAT_MACHO 1
+%endif
+
+; Set PREFIX for libaom builds.
+%if FORMAT_ELF
+ %undef PREFIX
+%elif WIN64
+ %undef PREFIX
+%else
+ %define PREFIX
+%endif
+
+%ifdef PREFIX
+ %define mangle(x) _ %+ x
+%else
+ %define mangle(x) x
+%endif
+
+; In some instances macho32 tables get misaligned when using .rodata.
+; When looking at the disassembly it appears that the offset is either
+; correct or consistently off by 90. Placing them in the .text section
+; works around the issue. It appears to be specific to the way libaom
+; handles the tables.
+%macro SECTION_RODATA 0-1 16
+ %ifidn __OUTPUT_FORMAT__,macho32
+ SECTION .text align=%1
+ fakegot:
+ %elifidn __OUTPUT_FORMAT__,aout
+ SECTION .text
+ %else
+ SECTION .rodata align=%1
+ %endif
+%endmacro
+
+; PIC macros are copied from aom_ports/x86_abi_support.asm. The "define PIC"
+; from original code is added in for 64bit.
+%ifidn __OUTPUT_FORMAT__,elf32
+%define ABI_IS_32BIT 1
+%elifidn __OUTPUT_FORMAT__,macho32
+%define ABI_IS_32BIT 1
+%elifidn __OUTPUT_FORMAT__,win32
+%define ABI_IS_32BIT 1
+%elifidn __OUTPUT_FORMAT__,aout
+%define ABI_IS_32BIT 1
+%else
+%define ABI_IS_32BIT 0
+%endif
+
+%if ABI_IS_32BIT
+ %if CONFIG_PIC=1
+ %ifidn __OUTPUT_FORMAT__,elf32
+ %define GET_GOT_DEFINED 1
+ %define WRT_PLT wrt ..plt
+ %macro GET_GOT 1
+ extern _GLOBAL_OFFSET_TABLE_
+ push %1
+ call %%get_got
+ %%sub_offset:
+ jmp %%exitGG
+ %%get_got:
+ mov %1, [esp]
+ add %1, _GLOBAL_OFFSET_TABLE_ + $$ - %%sub_offset wrt ..gotpc
+ ret
+ %%exitGG:
+ %undef GLOBAL
+ %define GLOBAL(x) x + %1 wrt ..gotoff
+ %undef RESTORE_GOT
+ %define RESTORE_GOT pop %1
+ %endmacro
+ %elifidn __OUTPUT_FORMAT__,macho32
+ %define GET_GOT_DEFINED 1
+ %macro GET_GOT 1
+ push %1
+ call %%get_got
+ %%get_got:
+ pop %1
+ %undef GLOBAL
+ %define GLOBAL(x) x + %1 - %%get_got
+ %undef RESTORE_GOT
+ %define RESTORE_GOT pop %1
+ %endmacro
+ %else
+ %define GET_GOT_DEFINED 0
+ %endif
+ %endif
+
+ %if ARCH_X86_64 == 0
+ %undef PIC
+ %endif
+
+%else
+ %macro GET_GOT 1
+ %endmacro
+ %define GLOBAL(x) rel x
+ %define WRT_PLT wrt ..plt
+
+ %if WIN64
+ %define PIC
+ %elifidn __OUTPUT_FORMAT__,macho64
+ %define PIC
+ %elif CONFIG_PIC
+ %define PIC
+ %endif
+%endif
+
+%ifnmacro GET_GOT
+ %macro GET_GOT 1
+ %endmacro
+ %define GLOBAL(x) x
+%endif
+%ifndef RESTORE_GOT
+ %define RESTORE_GOT
+%endif
+%ifndef WRT_PLT
+ %define WRT_PLT
+%endif
+
+%ifdef PIC
+ default rel
+%endif
+
+%ifndef GET_GOT_DEFINED
+ %define GET_GOT_DEFINED 0
+%endif
+; Done with PIC macros
+
+%ifdef __NASM_VER__
+ %use smartalign
+%endif
+
+; Macros to eliminate most code duplication between x86_32 and x86_64:
+; Currently this works only for leaf functions which load all their arguments
+; into registers at the start, and make no other use of the stack. Luckily that
+; covers most of x264's asm.
+
+; PROLOGUE:
+; %1 = number of arguments. loads them from stack if needed.
+; %2 = number of registers used. pushes callee-saved regs if needed.
+; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed.
+; %4 = (optional) stack size to be allocated. The stack will be aligned before
+; allocating the specified stack size. If the required stack alignment is
+; larger than the known stack alignment the stack will be manually aligned
+; and an extra register will be allocated to hold the original stack
+; pointer (to not invalidate r0m etc.). To prevent the use of an extra
+; register as stack pointer, request a negative stack size.
+; %4+/%5+ = list of names to define to registers
+; PROLOGUE can also be invoked by adding the same options to cglobal
+
+; e.g.
+; cglobal foo, 2,3,7,0x40, dst, src, tmp
+; declares a function (foo) that automatically loads two arguments (dst and
+; src) into registers, uses one additional register (tmp) plus 7 vector
+; registers (m0-m6) and allocates 0x40 bytes of stack space.
+
+; TODO Some functions can use some args directly from the stack. If they're the
+; last args then you can just not declare them, but if they're in the middle
+; we need more flexible macro.
+
+; RET:
+; Pops anything that was pushed by PROLOGUE, and returns.
+
+; REP_RET:
+; Use this instead of RET if it's a branch target.
+
+; registers:
+; rN and rNq are the native-size register holding function argument N
+; rNd, rNw, rNb are dword, word, and byte size
+; rNh is the high 8 bits of the word size
+; rNm is the original location of arg N (a register or on the stack), dword
+; rNmp is native size
+
+%macro DECLARE_REG 2-3
+ %define r%1q %2
+ %define r%1d %2d
+ %define r%1w %2w
+ %define r%1b %2b
+ %define r%1h %2h
+ %define %2q %2
+ %if %0 == 2
+ %define r%1m %2d
+ %define r%1mp %2
+ %elif ARCH_X86_64 ; memory
+ %define r%1m [rstk + stack_offset + %3]
+ %define r%1mp qword r %+ %1 %+ m
+ %else
+ %define r%1m [rstk + stack_offset + %3]
+ %define r%1mp dword r %+ %1 %+ m
+ %endif
+ %define r%1 %2
+%endmacro
+
+%macro DECLARE_REG_SIZE 3
+ %define r%1q r%1
+ %define e%1q r%1
+ %define r%1d e%1
+ %define e%1d e%1
+ %define r%1w %1
+ %define e%1w %1
+ %define r%1h %3
+ %define e%1h %3
+ %define r%1b %2
+ %define e%1b %2
+ %if ARCH_X86_64 == 0
+ %define r%1 e%1
+ %endif
+%endmacro
+
+DECLARE_REG_SIZE ax, al, ah
+DECLARE_REG_SIZE bx, bl, bh
+DECLARE_REG_SIZE cx, cl, ch
+DECLARE_REG_SIZE dx, dl, dh
+DECLARE_REG_SIZE si, sil, null
+DECLARE_REG_SIZE di, dil, null
+DECLARE_REG_SIZE bp, bpl, null
+
+; t# defines for when per-arch register allocation is more complex than just function arguments
+
+%macro DECLARE_REG_TMP 1-*
+ %assign %%i 0
+ %rep %0
+ CAT_XDEFINE t, %%i, r%1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro DECLARE_REG_TMP_SIZE 0-*
+ %rep %0
+ %define t%1q t%1 %+ q
+ %define t%1d t%1 %+ d
+ %define t%1w t%1 %+ w
+ %define t%1h t%1 %+ h
+ %define t%1b t%1 %+ b
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14
+
+%if ARCH_X86_64
+ %define gprsize 8
+%else
+ %define gprsize 4
+%endif
+
+%macro PUSH 1
+ push %1
+ %ifidn rstk, rsp
+ %assign stack_offset stack_offset+gprsize
+ %endif
+%endmacro
+
+%macro POP 1
+ pop %1
+ %ifidn rstk, rsp
+ %assign stack_offset stack_offset-gprsize
+ %endif
+%endmacro
+
+%macro PUSH_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ PUSH r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro POP_IF_USED 1-*
+ %rep %0
+ %if %1 < regs_used
+ pop r%1
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro LOAD_IF_USED 1-*
+ %rep %0
+ %if %1 < num_args
+ mov r%1, r %+ %1 %+ mp
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SUB 2
+ sub %1, %2
+ %ifidn %1, rstk
+ %assign stack_offset stack_offset+(%2)
+ %endif
+%endmacro
+
+%macro ADD 2
+ add %1, %2
+ %ifidn %1, rstk
+ %assign stack_offset stack_offset-(%2)
+ %endif
+%endmacro
+
+%macro movifnidn 2
+ %ifnidn %1, %2
+ mov %1, %2
+ %endif
+%endmacro
+
+%macro movsxdifnidn 2
+ %ifnidn %1, %2
+ movsxd %1, %2
+ %endif
+%endmacro
+
+%macro ASSERT 1
+ %if (%1) == 0
+ %error assertion ``%1'' failed
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS 0-*
+ %ifdef n_arg_names
+ %assign %%i 0
+ %rep n_arg_names
+ CAT_UNDEF arg_name %+ %%i, q
+ CAT_UNDEF arg_name %+ %%i, d
+ CAT_UNDEF arg_name %+ %%i, w
+ CAT_UNDEF arg_name %+ %%i, h
+ CAT_UNDEF arg_name %+ %%i, b
+ CAT_UNDEF arg_name %+ %%i, m
+ CAT_UNDEF arg_name %+ %%i, mp
+ CAT_UNDEF arg_name, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+
+ %xdefine %%stack_offset stack_offset
+ %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine
+ %assign %%i 0
+ %rep %0
+ %xdefine %1q r %+ %%i %+ q
+ %xdefine %1d r %+ %%i %+ d
+ %xdefine %1w r %+ %%i %+ w
+ %xdefine %1h r %+ %%i %+ h
+ %xdefine %1b r %+ %%i %+ b
+ %xdefine %1m r %+ %%i %+ m
+ %xdefine %1mp r %+ %%i %+ mp
+ CAT_XDEFINE arg_name, %%i, %1
+ %assign %%i %%i+1
+ %rotate 1
+ %endrep
+ %xdefine stack_offset %%stack_offset
+ %assign n_arg_names %0
+%endmacro
+
+%define required_stack_alignment ((mmsize + 15) & ~15)
+
+%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only)
+ %ifnum %1
+ %if %1 != 0
+ %assign %%pad 0
+ %assign stack_size %1
+ %if stack_size < 0
+ %assign stack_size -stack_size
+ %endif
+ %if WIN64
+ %assign %%pad %%pad + 32 ; shadow space
+ %if mmsize != 8
+ %assign xmm_regs_used %2
+ %if xmm_regs_used > 8
+ %assign %%pad %%pad + (xmm_regs_used-8)*16 ; callee-saved xmm registers
+ %endif
+ %endif
+ %endif
+ %if required_stack_alignment <= STACK_ALIGNMENT
+ ; maintain the current stack alignment
+ %assign stack_size_padded stack_size + %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+ SUB rsp, stack_size_padded
+ %else
+ %assign %%reg_num (regs_used - 1)
+ %xdefine rstk r %+ %%reg_num
+ ; align stack, and save original stack location directly above
+ ; it, i.e. in [rsp+stack_size_padded], so we can restore the
+ ; stack in a single instruction (i.e. mov rsp, rstk or mov
+ ; rsp, [rsp+stack_size_padded])
+ %if %1 < 0 ; need to store rsp on stack
+ %xdefine rstkm [rsp + stack_size + %%pad]
+ %assign %%pad %%pad + gprsize
+ %else ; can keep rsp in rstk during whole function
+ %xdefine rstkm rstk
+ %endif
+ %assign stack_size_padded stack_size + ((%%pad + required_stack_alignment-1) & ~(required_stack_alignment-1))
+ mov rstk, rsp
+ and rsp, ~(required_stack_alignment-1)
+ sub rsp, stack_size_padded
+ movifnidn rstkm, rstk
+ %endif
+ WIN64_PUSH_XMM
+ %endif
+ %endif
+%endmacro
+
+%macro SETUP_STACK_POINTER 1
+ %ifnum %1
+ %if %1 != 0 && required_stack_alignment > STACK_ALIGNMENT
+ %if %1 > 0
+ %assign regs_used (regs_used + 1)
+ %endif
+ %if ARCH_X86_64 && regs_used < 5 + UNIX64 * 3
+ ; Ensure that we don't clobber any registers containing arguments
+ %assign regs_used 5 + UNIX64 * 3
+ %endif
+ %endif
+ %endif
+%endmacro
+
+%macro DEFINE_ARGS_INTERNAL 3+
+ %ifnum %2
+ DEFINE_ARGS %3
+ %elif %1 == 4
+ DEFINE_ARGS %2
+ %elif %1 > 4
+ DEFINE_ARGS %2, %3
+ %endif
+%endmacro
+
+%if WIN64 ; Windows x64 ;=================================================
+
+DECLARE_REG 0, rcx
+DECLARE_REG 1, rdx
+DECLARE_REG 2, R8
+DECLARE_REG 3, R9
+DECLARE_REG 4, R10, 40
+DECLARE_REG 5, R11, 48
+DECLARE_REG 6, rax, 56
+DECLARE_REG 7, rdi, 64
+DECLARE_REG 8, rsi, 72
+DECLARE_REG 9, rbx, 80
+DECLARE_REG 10, rbp, 88
+DECLARE_REG 11, R12, 96
+DECLARE_REG 12, R13, 104
+DECLARE_REG 13, R14, 112
+DECLARE_REG 14, R15, 120
+
+%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ SETUP_STACK_POINTER %4
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14
+ ALLOC_STACK %4, %3
+ %if mmsize != 8 && stack_size == 0
+ WIN64_SPILL_XMM %3
+ %endif
+ LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%macro WIN64_PUSH_XMM 0
+ ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated.
+ %if xmm_regs_used > 6
+ movaps [rstk + stack_offset + 8], xmm6
+ %endif
+ %if xmm_regs_used > 7
+ movaps [rstk + stack_offset + 24], xmm7
+ %endif
+ %if xmm_regs_used > 8
+ %assign %%i 8
+ %rep xmm_regs_used-8
+ movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+%macro WIN64_SPILL_XMM 1
+ %assign xmm_regs_used %1
+ ASSERT xmm_regs_used <= 16
+ %if xmm_regs_used > 8
+ ; Allocate stack space for callee-saved xmm registers plus shadow space and align the stack.
+ %assign %%pad (xmm_regs_used-8)*16 + 32
+ %assign stack_size_padded %%pad + ((-%%pad-stack_offset-gprsize) & (STACK_ALIGNMENT-1))
+ SUB rsp, stack_size_padded
+ %endif
+ WIN64_PUSH_XMM
+%endmacro
+
+%macro WIN64_RESTORE_XMM_INTERNAL 1
+ %assign %%pad_size 0
+ %if xmm_regs_used > 8
+ %assign %%i xmm_regs_used
+ %rep xmm_regs_used-8
+ %assign %%i %%i-1
+ movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32]
+ %endrep
+ %endif
+ %if stack_size_padded > 0
+ %if stack_size > 0 && required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add %1, stack_size_padded
+ %assign %%pad_size stack_size_padded
+ %endif
+ %endif
+ %if xmm_regs_used > 7
+ movaps xmm7, [%1 + stack_offset - %%pad_size + 24]
+ %endif
+ %if xmm_regs_used > 6
+ movaps xmm6, [%1 + stack_offset - %%pad_size + 8]
+ %endif
+%endmacro
+
+%macro WIN64_RESTORE_XMM 1
+ WIN64_RESTORE_XMM_INTERNAL %1
+ %assign stack_offset (stack_offset-stack_size_padded)
+ %assign xmm_regs_used 0
+%endmacro
+
+%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0
+
+%macro RET 0
+ WIN64_RESTORE_XMM_INTERNAL rsp
+ POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7
+ %if mmsize == 32
+ vzeroupper
+ %endif
+ AUTO_REP_RET
+%endmacro
+
+%elif ARCH_X86_64 ; *nix x64 ;=============================================
+
+DECLARE_REG 0, rdi
+DECLARE_REG 1, rsi
+DECLARE_REG 2, rdx
+DECLARE_REG 3, rcx
+DECLARE_REG 4, R8
+DECLARE_REG 5, R9
+DECLARE_REG 6, rax, 8
+DECLARE_REG 7, R10, 16
+DECLARE_REG 8, R11, 24
+DECLARE_REG 9, rbx, 32
+DECLARE_REG 10, rbp, 40
+DECLARE_REG 11, R12, 48
+DECLARE_REG 12, R13, 56
+DECLARE_REG 13, R14, 64
+DECLARE_REG 14, R15, 72
+
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ SETUP_STACK_POINTER %4
+ ASSERT regs_used <= 15
+ PUSH_IF_USED 9, 10, 11, 12, 13, 14
+ ALLOC_STACK %4
+ LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14
+ DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0
+
+%macro RET 0
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
+ POP_IF_USED 14, 13, 12, 11, 10, 9
+ %if mmsize == 32
+ vzeroupper
+ %endif
+ AUTO_REP_RET
+%endmacro
+
+%else ; X86_32 ;==============================================================
+
+DECLARE_REG 0, eax, 4
+DECLARE_REG 1, ecx, 8
+DECLARE_REG 2, edx, 12
+DECLARE_REG 3, ebx, 16
+DECLARE_REG 4, esi, 20
+DECLARE_REG 5, edi, 24
+DECLARE_REG 6, ebp, 28
+%define rsp esp
+
+%macro DECLARE_ARG 1-*
+ %rep %0
+ %define r%1m [rstk + stack_offset + 4*%1 + 4]
+ %define r%1mp dword r%1m
+ %rotate 1
+ %endrep
+%endmacro
+
+DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14
+
+%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names...
+ %assign num_args %1
+ %assign regs_used %2
+ ASSERT regs_used >= num_args
+ %if num_args > 7
+ %assign num_args 7
+ %endif
+ %if regs_used > 7
+ %assign regs_used 7
+ %endif
+ SETUP_STACK_POINTER %4
+ ASSERT regs_used <= 7
+ PUSH_IF_USED 3, 4, 5, 6
+ ALLOC_STACK %4
+ LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6
+ DEFINE_ARGS_INTERNAL %0, %4, %5
+%endmacro
+
+%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0
+
+%macro RET 0
+ %if stack_size_padded > 0
+ %if required_stack_alignment > STACK_ALIGNMENT
+ mov rsp, rstkm
+ %else
+ add rsp, stack_size_padded
+ %endif
+ %endif
+ POP_IF_USED 6, 5, 4, 3
+ %if mmsize == 32
+ vzeroupper
+ %endif
+ AUTO_REP_RET
+%endmacro
+
+%endif ;======================================================================
+
+%if WIN64 == 0
+ %macro WIN64_SPILL_XMM 1
+ %endmacro
+ %macro WIN64_RESTORE_XMM 1
+ %endmacro
+ %macro WIN64_PUSH_XMM 0
+ %endmacro
+%endif
+
+; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either
+; a branch or a branch target. So switch to a 2-byte form of ret in that case.
+; We can automatically detect "follows a branch", but not a branch target.
+; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.)
+%macro REP_RET 0
+ %if has_epilogue
+ RET
+ %else
+ rep ret
+ %endif
+ annotate_function_size
+%endmacro
+
+%define last_branch_adr $$
+%macro AUTO_REP_RET 0
+ %if notcpuflag(ssse3)
+ times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ == last_branch_adr.
+ %endif
+ ret
+ annotate_function_size
+%endmacro
+
+%macro BRANCH_INSTR 0-*
+ %rep %0
+ %macro %1 1-2 %1
+ %2 %1
+ %if notcpuflag(ssse3)
+ %%branch_instr equ $
+ %xdefine last_branch_adr %%branch_instr
+ %endif
+ %endmacro
+ %rotate 1
+ %endrep
+%endmacro
+
+BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp
+
+%macro TAIL_CALL 2 ; callee, is_nonadjacent
+ %if has_epilogue
+ call %1
+ RET
+ %elif %2
+ jmp %1
+ %endif
+ annotate_function_size
+%endmacro
+
+;=============================================================================
+; arch-independent part
+;=============================================================================
+
+%assign function_align 16
+
+; Begin a function.
+; Applies any symbol mangling needed for C linkage, and sets up a define such that
+; subsequent uses of the function name automatically refer to the mangled version.
+; Appends cpuflags to the function name if cpuflags has been specified.
+; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX
+; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2).
+%macro cglobal 1-2+ "" ; name, [PROLOGUE args]
+ cglobal_internal 1, %1 %+ SUFFIX, %2
+%endmacro
+%macro cvisible 1-2+ "" ; name, [PROLOGUE args]
+ cglobal_internal 0, %1 %+ SUFFIX, %2
+%endmacro
+%macro cglobal_internal 2-3+
+ annotate_function_size
+ %if %1
+ %xdefine %%FUNCTION_PREFIX private_prefix
+ ; libaom explicitly sets visibility in shared object builds. Avoid
+ ; setting visibility to hidden as it may break builds that split
+ ; sources on e.g., directory boundaries.
+ %ifdef CHROMIUM
+ %xdefine %%VISIBILITY hidden
+ %else
+ %xdefine %%VISIBILITY
+ %endif
+ %else
+ %xdefine %%FUNCTION_PREFIX public_prefix
+ %xdefine %%VISIBILITY
+ %endif
+ %ifndef cglobaled_%2
+ %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2)
+ %xdefine %2.skip_prologue %2 %+ .skip_prologue
+ CAT_XDEFINE cglobaled_, %2, 1
+ %endif
+ %xdefine current_function %2
+ %xdefine current_function_section __SECT__
+ %if FORMAT_ELF
+ global %2:function %%VISIBILITY
+ %elif FORMAT_MACHO
+ %ifdef __NASM_VER__
+ global %2
+ %else
+ global %2:private_extern
+ %endif
+ %else
+ global %2
+ %endif
+ align function_align
+ %2:
+ RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer
+ %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required
+ %assign stack_offset 0 ; stack pointer offset relative to the return address
+ %assign stack_size 0 ; amount of stack space that can be freely used inside a function
+ %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding
+ %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64
+ %ifnidn %3, ""
+ PROLOGUE %3
+ %endif
+%endmacro
+
+%macro cextern 1
+ %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+; like cextern, but without the prefix
+%macro cextern_naked 1
+ %ifdef PREFIX
+ %xdefine %1 mangle(%1)
+ %endif
+ CAT_XDEFINE cglobaled_, %1, 1
+ extern %1
+%endmacro
+
+%macro const 1-2+
+ %xdefine %1 mangle(private_prefix %+ _ %+ %1)
+ %if FORMAT_ELF
+ global %1:data hidden
+ %else
+ global %1
+ %endif
+ %1: %2
+%endmacro
+
+; This is needed for ELF, otherwise the GNU linker assumes the stack is executable by default.
+%if FORMAT_ELF
+ [SECTION .note.GNU-stack noalloc noexec nowrite progbits]
+%endif
+
+; Tell debuggers how large the function was.
+; This may be invoked multiple times per function; we rely on later instances overriding earlier ones.
+; This is invoked by RET and similar macros, and also cglobal does it for the previous function,
+; but if the last function in a source file doesn't use any of the standard macros for its epilogue,
+; then its size might be unspecified.
+%macro annotate_function_size 0
+ %ifdef __YASM_VER__
+ %ifdef current_function
+ %if FORMAT_ELF
+ current_function_section
+ %%ecf equ $
+ size current_function %%ecf - current_function
+ __SECT__
+ %endif
+ %endif
+ %endif
+%endmacro
+
+; cpuflags
+
+%assign cpuflags_mmx (1<<0)
+%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx
+%assign cpuflags_3dnow (1<<2) | cpuflags_mmx
+%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow
+%assign cpuflags_sse (1<<4) | cpuflags_mmx2
+%assign cpuflags_sse2 (1<<5) | cpuflags_sse
+%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2
+%assign cpuflags_sse3 (1<<7) | cpuflags_sse2
+%assign cpuflags_ssse3 (1<<8) | cpuflags_sse3
+%assign cpuflags_sse4 (1<<9) | cpuflags_ssse3
+%assign cpuflags_sse42 (1<<10)| cpuflags_sse4
+%assign cpuflags_avx (1<<11)| cpuflags_sse42
+%assign cpuflags_xop (1<<12)| cpuflags_avx
+%assign cpuflags_fma4 (1<<13)| cpuflags_avx
+%assign cpuflags_fma3 (1<<14)| cpuflags_avx
+%assign cpuflags_avx2 (1<<15)| cpuflags_fma3
+
+%assign cpuflags_cache32 (1<<16)
+%assign cpuflags_cache64 (1<<17)
+%assign cpuflags_slowctz (1<<18)
+%assign cpuflags_lzcnt (1<<19)
+%assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant
+%assign cpuflags_atom (1<<21)
+%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt
+%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1
+
+; Returns a boolean value expressing whether or not the specified cpuflag is enabled.
+%define cpuflag(x) (((((cpuflags & (cpuflags_ %+ x)) ^ (cpuflags_ %+ x)) - 1) >> 31) & 1)
+%define notcpuflag(x) (cpuflag(x) ^ 1)
+
+; Takes an arbitrary number of cpuflags from the above list.
+; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu.
+; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co.
+%macro INIT_CPUFLAGS 0-*
+ %xdefine SUFFIX
+ %undef cpuname
+ %assign cpuflags 0
+
+ %if %0 >= 1
+ %rep %0
+ %ifdef cpuname
+ %xdefine cpuname cpuname %+ _%1
+ %else
+ %xdefine cpuname %1
+ %endif
+ %assign cpuflags cpuflags | cpuflags_%1
+ %rotate 1
+ %endrep
+ %xdefine SUFFIX _ %+ cpuname
+
+ %if cpuflag(avx)
+ %assign avx_enabled 1
+ %endif
+ %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2))
+ %define mova movaps
+ %define movu movups
+ %define movnta movntps
+ %endif
+ %if cpuflag(aligned)
+ %define movu mova
+ %elif cpuflag(sse3) && notcpuflag(ssse3)
+ %define movu lddqu
+ %endif
+ %endif
+
+ %if ARCH_X86_64 || cpuflag(sse2)
+ %ifdef __NASM_VER__
+ ALIGNMODE k8
+ %else
+ CPU amdnop
+ %endif
+ %else
+ %ifdef __NASM_VER__
+ ALIGNMODE nop
+ %else
+ CPU basicnop
+ %endif
+ %endif
+%endmacro
+
+; Merge mmx and sse*
+; m# is a simd register of the currently selected size
+; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m#
+; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m#
+; (All 3 remain in sync through SWAP.)
+
+%macro CAT_XDEFINE 3
+ %xdefine %1%2 %3
+%endmacro
+
+%macro CAT_UNDEF 2
+ %undef %1%2
+%endmacro
+
+%macro INIT_MMX 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_MMX %1
+ %define mmsize 8
+ %define num_mmregs 8
+ %define mova movq
+ %define movu movq
+ %define movh movd
+ %define movnta movntq
+ %assign %%i 0
+ %rep 8
+ CAT_XDEFINE m, %%i, mm %+ %%i
+ CAT_XDEFINE nnmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %rep 8
+ CAT_UNDEF m, %%i
+ CAT_UNDEF nnmm, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_XMM 0-1+
+ %assign avx_enabled 0
+ %define RESET_MM_PERMUTATION INIT_XMM %1
+ %define mmsize 16
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %define movh movq
+ %define movnta movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, xmm %+ %%i
+ CAT_XDEFINE nnxmm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+%macro INIT_YMM 0-1+
+ %assign avx_enabled 1
+ %define RESET_MM_PERMUTATION INIT_YMM %1
+ %define mmsize 32
+ %define num_mmregs 8
+ %if ARCH_X86_64
+ %define num_mmregs 16
+ %endif
+ %define mova movdqa
+ %define movu movdqu
+ %undef movh
+ %define movnta movntdq
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, ymm %+ %%i
+ CAT_XDEFINE nnymm, %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ INIT_CPUFLAGS %1
+%endmacro
+
+INIT_XMM
+
+%macro DECLARE_MMCAST 1
+ %define mmmm%1 mm%1
+ %define mmxmm%1 mm%1
+ %define mmymm%1 mm%1
+ %define xmmmm%1 mm%1
+ %define xmmxmm%1 xmm%1
+ %define xmmymm%1 xmm%1
+ %define ymmmm%1 mm%1
+ %define ymmxmm%1 xmm%1
+ %define ymmymm%1 ymm%1
+ %define xm%1 xmm %+ m%1
+ %define ym%1 ymm %+ m%1
+%endmacro
+
+%assign i 0
+%rep 16
+ DECLARE_MMCAST i
+ %assign i i+1
+%endrep
+
+; I often want to use macros that permute their arguments. e.g. there's no
+; efficient way to implement butterfly or transpose or dct without swapping some
+; arguments.
+;
+; I would like to not have to manually keep track of the permutations:
+; If I insert a permutation in the middle of a function, it should automatically
+; change everything that follows. For more complex macros I may also have multiple
+; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations.
+;
+; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that
+; permutes its arguments. It's equivalent to exchanging the contents of the
+; registers, except that this way you exchange the register names instead, so it
+; doesn't cost any cycles.
+
+%macro PERMUTE 2-* ; takes a list of pairs to swap
+ %rep %0/2
+ %xdefine %%tmp%2 m%2
+ %rotate 2
+ %endrep
+ %rep %0/2
+ %xdefine m%1 %%tmp%2
+ CAT_XDEFINE nn, m%1, %1
+ %rotate 2
+ %endrep
+%endmacro
+
+%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs)
+ %ifnum %1 ; SWAP 0, 1, ...
+ SWAP_INTERNAL_NUM %1, %2
+ %else ; SWAP m0, m1, ...
+ SWAP_INTERNAL_NAME %1, %2
+ %endif
+%endmacro
+
+%macro SWAP_INTERNAL_NUM 2-*
+ %rep %0-1
+ %xdefine %%tmp m%1
+ %xdefine m%1 m%2
+ %xdefine m%2 %%tmp
+ CAT_XDEFINE nn, m%1, %1
+ CAT_XDEFINE nn, m%2, %2
+ %rotate 1
+ %endrep
+%endmacro
+
+%macro SWAP_INTERNAL_NAME 2-*
+ %xdefine %%args nn %+ %1
+ %rep %0-1
+ %xdefine %%args %%args, nn %+ %2
+ %rotate 1
+ %endrep
+ SWAP_INTERNAL_NUM %%args
+%endmacro
+
+; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later
+; calls to that function will automatically load the permutation, so values can
+; be returned in mmregs.
+%macro SAVE_MM_PERMUTATION 0-1
+ %if %0
+ %xdefine %%f %1_m
+ %else
+ %xdefine %%f current_function %+ _m
+ %endif
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE %%f, %%i, m %+ %%i
+ %assign %%i %%i+1
+ %endrep
+%endmacro
+
+%macro LOAD_MM_PERMUTATION 1 ; name to load from
+ %ifdef %1_m0
+ %assign %%i 0
+ %rep num_mmregs
+ CAT_XDEFINE m, %%i, %1_m %+ %%i
+ CAT_XDEFINE nn, m %+ %%i, %%i
+ %assign %%i %%i+1
+ %endrep
+ %endif
+%endmacro
+
+; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't
+%macro call 1
+ call_internal %1 %+ SUFFIX, %1
+%endmacro
+%macro call_internal 2
+ %xdefine %%i %2
+ %ifndef cglobaled_%2
+ %ifdef cglobaled_%1
+ %xdefine %%i %1
+ %endif
+ %endif
+ call %%i
+ LOAD_MM_PERMUTATION %%i
+%endmacro
+
+; Substitutions that reduce instruction size but are functionally equivalent
+%macro add 2
+ %ifnum %2
+ %if %2==128
+ sub %1, -128
+ %else
+ add %1, %2
+ %endif
+ %else
+ add %1, %2
+ %endif
+%endmacro
+
+%macro sub 2
+ %ifnum %2
+ %if %2==128
+ add %1, -128
+ %else
+ sub %1, %2
+ %endif
+ %else
+ sub %1, %2
+ %endif
+%endmacro
+
+;=============================================================================
+; AVX abstraction layer
+;=============================================================================
+
+%assign i 0
+%rep 16
+ %if i < 8
+ CAT_XDEFINE sizeofmm, i, 8
+ %endif
+ CAT_XDEFINE sizeofxmm, i, 16
+ CAT_XDEFINE sizeofymm, i, 32
+ %assign i i+1
+%endrep
+%undef i
+
+%macro CHECK_AVX_INSTR_EMU 3-*
+ %xdefine %%opcode %1
+ %xdefine %%dst %2
+ %rep %0-2
+ %ifidn %%dst, %3
+ %error non-avx emulation of ``%%opcode'' is not supported
+ %endif
+ %rotate 1
+ %endrep
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+;%6+: operands
+%macro RUN_AVX_INSTR 6-9+
+ %ifnum sizeof%7
+ %assign __sizeofreg sizeof%7
+ %elifnum sizeof%6
+ %assign __sizeofreg sizeof%6
+ %else
+ %assign __sizeofreg mmsize
+ %endif
+ %assign __emulate_avx 0
+ %if avx_enabled && __sizeofreg >= 16
+ %xdefine __instr v%1
+ %else
+ %xdefine __instr %1
+ %if %0 >= 8+%4
+ %assign __emulate_avx 1
+ %endif
+ %endif
+ %ifnidn %2, fnord
+ %ifdef cpuname
+ %if notcpuflag(%2)
+ %error use of ``%1'' %2 instruction in cpuname function: current_function
+ %elif cpuflags_%2 < cpuflags_sse && notcpuflag(sse2) && __sizeofreg > 8
+ %error use of ``%1'' sse2 instruction in cpuname function: current_function
+ %endif
+ %endif
+ %endif
+
+ %if __emulate_avx
+ %xdefine __src1 %7
+ %xdefine __src2 %8
+ %ifnidn %6, %7
+ %if %0 >= 9
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8, %9}, %6, %8, %9
+ %else
+ CHECK_AVX_INSTR_EMU {%1 %6, %7, %8}, %6, %8
+ %endif
+ %if %5 && %4 == 0
+ %ifnid %8
+ ; 3-operand AVX instructions with a memory arg can only have it in src2,
+ ; whereas SSE emulation prefers to have it in src1 (i.e. the mov).
+ ; So, if the instruction is commutative with a memory arg, swap them.
+ %xdefine __src1 %8
+ %xdefine __src2 %7
+ %endif
+ %endif
+ %if __sizeofreg == 8
+ MOVQ %6, __src1
+ %elif %3
+ MOVAPS %6, __src1
+ %else
+ MOVDQA %6, __src1
+ %endif
+ %endif
+ %if %0 >= 9
+ %1 %6, __src2, %9
+ %else
+ %1 %6, __src2
+ %endif
+ %elif %0 >= 9
+ __instr %6, %7, %8, %9
+ %elif %0 == 8
+ __instr %6, %7, %8
+ %elif %0 == 7
+ __instr %6, %7
+ %else
+ __instr %6
+ %endif
+%endmacro
+
+;%1 == instruction
+;%2 == minimal instruction set
+;%3 == 1 if float, 0 if int
+;%4 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise
+;%5 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not
+%macro AVX_INSTR 1-5 fnord, 0, 1, 0
+ %macro %1 1-10 fnord, fnord, fnord, fnord, %1, %2, %3, %4, %5
+ %ifidn %2, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1
+ %elifidn %3, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2
+ %elifidn %4, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3
+ %elifidn %5, fnord
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4
+ %else
+ RUN_AVX_INSTR %6, %7, %8, %9, %10, %1, %2, %3, %4, %5
+ %endif
+ %endmacro
+%endmacro
+
+; Instructions with both VEX and non-VEX encodings
+; Non-destructive instructions are written without parameters
+AVX_INSTR addpd, sse2, 1, 0, 1
+AVX_INSTR addps, sse, 1, 0, 1
+AVX_INSTR addsd, sse2, 1, 0, 1
+AVX_INSTR addss, sse, 1, 0, 1
+AVX_INSTR addsubpd, sse3, 1, 0, 0
+AVX_INSTR addsubps, sse3, 1, 0, 0
+AVX_INSTR aesdec, fnord, 0, 0, 0
+AVX_INSTR aesdeclast, fnord, 0, 0, 0
+AVX_INSTR aesenc, fnord, 0, 0, 0
+AVX_INSTR aesenclast, fnord, 0, 0, 0
+AVX_INSTR aesimc
+AVX_INSTR aeskeygenassist
+AVX_INSTR andnpd, sse2, 1, 0, 0
+AVX_INSTR andnps, sse, 1, 0, 0
+AVX_INSTR andpd, sse2, 1, 0, 1
+AVX_INSTR andps, sse, 1, 0, 1
+AVX_INSTR blendpd, sse4, 1, 0, 0
+AVX_INSTR blendps, sse4, 1, 0, 0
+AVX_INSTR blendvpd, sse4, 1, 0, 0
+AVX_INSTR blendvps, sse4, 1, 0, 0
+AVX_INSTR cmppd, sse2, 1, 1, 0
+AVX_INSTR cmpps, sse, 1, 1, 0
+AVX_INSTR cmpsd, sse2, 1, 1, 0
+AVX_INSTR cmpss, sse, 1, 1, 0
+AVX_INSTR comisd, sse2
+AVX_INSTR comiss, sse
+AVX_INSTR cvtdq2pd, sse2
+AVX_INSTR cvtdq2ps, sse2
+AVX_INSTR cvtpd2dq, sse2
+AVX_INSTR cvtpd2ps, sse2
+AVX_INSTR cvtps2dq, sse2
+AVX_INSTR cvtps2pd, sse2
+AVX_INSTR cvtsd2si, sse2
+AVX_INSTR cvtsd2ss, sse2
+AVX_INSTR cvtsi2sd, sse2
+AVX_INSTR cvtsi2ss, sse
+AVX_INSTR cvtss2sd, sse2
+AVX_INSTR cvtss2si, sse
+AVX_INSTR cvttpd2dq, sse2
+AVX_INSTR cvttps2dq, sse2
+AVX_INSTR cvttsd2si, sse2
+AVX_INSTR cvttss2si, sse
+AVX_INSTR divpd, sse2, 1, 0, 0
+AVX_INSTR divps, sse, 1, 0, 0
+AVX_INSTR divsd, sse2, 1, 0, 0
+AVX_INSTR divss, sse, 1, 0, 0
+AVX_INSTR dppd, sse4, 1, 1, 0
+AVX_INSTR dpps, sse4, 1, 1, 0
+AVX_INSTR extractps, sse4
+AVX_INSTR haddpd, sse3, 1, 0, 0
+AVX_INSTR haddps, sse3, 1, 0, 0
+AVX_INSTR hsubpd, sse3, 1, 0, 0
+AVX_INSTR hsubps, sse3, 1, 0, 0
+AVX_INSTR insertps, sse4, 1, 1, 0
+AVX_INSTR lddqu, sse3
+AVX_INSTR ldmxcsr, sse
+AVX_INSTR maskmovdqu, sse2
+AVX_INSTR maxpd, sse2, 1, 0, 1
+AVX_INSTR maxps, sse, 1, 0, 1
+AVX_INSTR maxsd, sse2, 1, 0, 1
+AVX_INSTR maxss, sse, 1, 0, 1
+AVX_INSTR minpd, sse2, 1, 0, 1
+AVX_INSTR minps, sse, 1, 0, 1
+AVX_INSTR minsd, sse2, 1, 0, 1
+AVX_INSTR minss, sse, 1, 0, 1
+AVX_INSTR movapd, sse2
+AVX_INSTR movaps, sse
+AVX_INSTR movd, mmx
+AVX_INSTR movddup, sse3
+AVX_INSTR movdqa, sse2
+AVX_INSTR movdqu, sse2
+AVX_INSTR movhlps, sse, 1, 0, 0
+AVX_INSTR movhpd, sse2, 1, 0, 0
+AVX_INSTR movhps, sse, 1, 0, 0
+AVX_INSTR movlhps, sse, 1, 0, 0
+AVX_INSTR movlpd, sse2, 1, 0, 0
+AVX_INSTR movlps, sse, 1, 0, 0
+AVX_INSTR movmskpd, sse2
+AVX_INSTR movmskps, sse
+AVX_INSTR movntdq, sse2
+AVX_INSTR movntdqa, sse4
+AVX_INSTR movntpd, sse2
+AVX_INSTR movntps, sse
+AVX_INSTR movq, mmx
+AVX_INSTR movsd, sse2, 1, 0, 0
+AVX_INSTR movshdup, sse3
+AVX_INSTR movsldup, sse3
+AVX_INSTR movss, sse, 1, 0, 0
+AVX_INSTR movupd, sse2
+AVX_INSTR movups, sse
+AVX_INSTR mpsadbw, sse4
+AVX_INSTR mulpd, sse2, 1, 0, 1
+AVX_INSTR mulps, sse, 1, 0, 1
+AVX_INSTR mulsd, sse2, 1, 0, 1
+AVX_INSTR mulss, sse, 1, 0, 1
+AVX_INSTR orpd, sse2, 1, 0, 1
+AVX_INSTR orps, sse, 1, 0, 1
+AVX_INSTR pabsb, ssse3
+AVX_INSTR pabsd, ssse3
+AVX_INSTR pabsw, ssse3
+AVX_INSTR packsswb, mmx, 0, 0, 0
+AVX_INSTR packssdw, mmx, 0, 0, 0
+AVX_INSTR packuswb, mmx, 0, 0, 0
+AVX_INSTR packusdw, sse4, 0, 0, 0
+AVX_INSTR paddb, mmx, 0, 0, 1
+AVX_INSTR paddw, mmx, 0, 0, 1
+AVX_INSTR paddd, mmx, 0, 0, 1
+AVX_INSTR paddq, sse2, 0, 0, 1
+AVX_INSTR paddsb, mmx, 0, 0, 1
+AVX_INSTR paddsw, mmx, 0, 0, 1
+AVX_INSTR paddusb, mmx, 0, 0, 1
+AVX_INSTR paddusw, mmx, 0, 0, 1
+AVX_INSTR palignr, ssse3
+AVX_INSTR pand, mmx, 0, 0, 1
+AVX_INSTR pandn, mmx, 0, 0, 0
+AVX_INSTR pavgb, mmx2, 0, 0, 1
+AVX_INSTR pavgw, mmx2, 0, 0, 1
+AVX_INSTR pblendvb, sse4, 0, 0, 0
+AVX_INSTR pblendw, sse4
+AVX_INSTR pclmulqdq
+AVX_INSTR pcmpestri, sse42
+AVX_INSTR pcmpestrm, sse42
+AVX_INSTR pcmpistri, sse42
+AVX_INSTR pcmpistrm, sse42
+AVX_INSTR pcmpeqb, mmx, 0, 0, 1
+AVX_INSTR pcmpeqw, mmx, 0, 0, 1
+AVX_INSTR pcmpeqd, mmx, 0, 0, 1
+AVX_INSTR pcmpeqq, sse4, 0, 0, 1
+AVX_INSTR pcmpgtb, mmx, 0, 0, 0
+AVX_INSTR pcmpgtw, mmx, 0, 0, 0
+AVX_INSTR pcmpgtd, mmx, 0, 0, 0
+AVX_INSTR pcmpgtq, sse42, 0, 0, 0
+AVX_INSTR pextrb, sse4
+AVX_INSTR pextrd, sse4
+AVX_INSTR pextrq, sse4
+AVX_INSTR pextrw, mmx2
+AVX_INSTR phaddw, ssse3, 0, 0, 0
+AVX_INSTR phaddd, ssse3, 0, 0, 0
+AVX_INSTR phaddsw, ssse3, 0, 0, 0
+AVX_INSTR phminposuw, sse4
+AVX_INSTR phsubw, ssse3, 0, 0, 0
+AVX_INSTR phsubd, ssse3, 0, 0, 0
+AVX_INSTR phsubsw, ssse3, 0, 0, 0
+AVX_INSTR pinsrb, sse4
+AVX_INSTR pinsrd, sse4
+AVX_INSTR pinsrq, sse4
+AVX_INSTR pinsrw, mmx2
+AVX_INSTR pmaddwd, mmx, 0, 0, 1
+AVX_INSTR pmaddubsw, ssse3, 0, 0, 0
+AVX_INSTR pmaxsb, sse4, 0, 0, 1
+AVX_INSTR pmaxsw, mmx2, 0, 0, 1
+AVX_INSTR pmaxsd, sse4, 0, 0, 1
+AVX_INSTR pmaxub, mmx2, 0, 0, 1
+AVX_INSTR pmaxuw, sse4, 0, 0, 1
+AVX_INSTR pmaxud, sse4, 0, 0, 1
+AVX_INSTR pminsb, sse4, 0, 0, 1
+AVX_INSTR pminsw, mmx2, 0, 0, 1
+AVX_INSTR pminsd, sse4, 0, 0, 1
+AVX_INSTR pminub, mmx2, 0, 0, 1
+AVX_INSTR pminuw, sse4, 0, 0, 1
+AVX_INSTR pminud, sse4, 0, 0, 1
+AVX_INSTR pmovmskb, mmx2
+AVX_INSTR pmovsxbw, sse4
+AVX_INSTR pmovsxbd, sse4
+AVX_INSTR pmovsxbq, sse4
+AVX_INSTR pmovsxwd, sse4
+AVX_INSTR pmovsxwq, sse4
+AVX_INSTR pmovsxdq, sse4
+AVX_INSTR pmovzxbw, sse4
+AVX_INSTR pmovzxbd, sse4
+AVX_INSTR pmovzxbq, sse4
+AVX_INSTR pmovzxwd, sse4
+AVX_INSTR pmovzxwq, sse4
+AVX_INSTR pmovzxdq, sse4
+AVX_INSTR pmuldq, sse4, 0, 0, 1
+AVX_INSTR pmulhrsw, ssse3, 0, 0, 1
+AVX_INSTR pmulhuw, mmx2, 0, 0, 1
+AVX_INSTR pmulhw, mmx, 0, 0, 1
+AVX_INSTR pmullw, mmx, 0, 0, 1
+AVX_INSTR pmulld, sse4, 0, 0, 1
+AVX_INSTR pmuludq, sse2, 0, 0, 1
+AVX_INSTR por, mmx, 0, 0, 1
+AVX_INSTR psadbw, mmx2, 0, 0, 1
+AVX_INSTR pshufb, ssse3, 0, 0, 0
+AVX_INSTR pshufd, sse2
+AVX_INSTR pshufhw, sse2
+AVX_INSTR pshuflw, sse2
+AVX_INSTR psignb, ssse3, 0, 0, 0
+AVX_INSTR psignw, ssse3, 0, 0, 0
+AVX_INSTR psignd, ssse3, 0, 0, 0
+AVX_INSTR psllw, mmx, 0, 0, 0
+AVX_INSTR pslld, mmx, 0, 0, 0
+AVX_INSTR psllq, mmx, 0, 0, 0
+AVX_INSTR pslldq, sse2, 0, 0, 0
+AVX_INSTR psraw, mmx, 0, 0, 0
+AVX_INSTR psrad, mmx, 0, 0, 0
+AVX_INSTR psrlw, mmx, 0, 0, 0
+AVX_INSTR psrld, mmx, 0, 0, 0
+AVX_INSTR psrlq, mmx, 0, 0, 0
+AVX_INSTR psrldq, sse2, 0, 0, 0
+AVX_INSTR psubb, mmx, 0, 0, 0
+AVX_INSTR psubw, mmx, 0, 0, 0
+AVX_INSTR psubd, mmx, 0, 0, 0
+AVX_INSTR psubq, sse2, 0, 0, 0
+AVX_INSTR psubsb, mmx, 0, 0, 0
+AVX_INSTR psubsw, mmx, 0, 0, 0
+AVX_INSTR psubusb, mmx, 0, 0, 0
+AVX_INSTR psubusw, mmx, 0, 0, 0
+AVX_INSTR ptest, sse4
+AVX_INSTR punpckhbw, mmx, 0, 0, 0
+AVX_INSTR punpckhwd, mmx, 0, 0, 0
+AVX_INSTR punpckhdq, mmx, 0, 0, 0
+AVX_INSTR punpckhqdq, sse2, 0, 0, 0
+AVX_INSTR punpcklbw, mmx, 0, 0, 0
+AVX_INSTR punpcklwd, mmx, 0, 0, 0
+AVX_INSTR punpckldq, mmx, 0, 0, 0
+AVX_INSTR punpcklqdq, sse2, 0, 0, 0
+AVX_INSTR pxor, mmx, 0, 0, 1
+AVX_INSTR rcpps, sse, 1, 0, 0
+AVX_INSTR rcpss, sse, 1, 0, 0
+AVX_INSTR roundpd, sse4
+AVX_INSTR roundps, sse4
+AVX_INSTR roundsd, sse4
+AVX_INSTR roundss, sse4
+AVX_INSTR rsqrtps, sse, 1, 0, 0
+AVX_INSTR rsqrtss, sse, 1, 0, 0
+AVX_INSTR shufpd, sse2, 1, 1, 0
+AVX_INSTR shufps, sse, 1, 1, 0
+AVX_INSTR sqrtpd, sse2, 1, 0, 0
+AVX_INSTR sqrtps, sse, 1, 0, 0
+AVX_INSTR sqrtsd, sse2, 1, 0, 0
+AVX_INSTR sqrtss, sse, 1, 0, 0
+AVX_INSTR stmxcsr, sse
+AVX_INSTR subpd, sse2, 1, 0, 0
+AVX_INSTR subps, sse, 1, 0, 0
+AVX_INSTR subsd, sse2, 1, 0, 0
+AVX_INSTR subss, sse, 1, 0, 0
+AVX_INSTR ucomisd, sse2
+AVX_INSTR ucomiss, sse
+AVX_INSTR unpckhpd, sse2, 1, 0, 0
+AVX_INSTR unpckhps, sse, 1, 0, 0
+AVX_INSTR unpcklpd, sse2, 1, 0, 0
+AVX_INSTR unpcklps, sse, 1, 0, 0
+AVX_INSTR xorpd, sse2, 1, 0, 1
+AVX_INSTR xorps, sse, 1, 0, 1
+
+; 3DNow instructions, for sharing code between AVX, SSE and 3DN
+AVX_INSTR pfadd, 3dnow, 1, 0, 1
+AVX_INSTR pfsub, 3dnow, 1, 0, 0
+AVX_INSTR pfmul, 3dnow, 1, 0, 1
+
+; base-4 constants for shuffles
+%assign i 0
+%rep 256
+ %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3)
+ %if j < 10
+ CAT_XDEFINE q000, j, i
+ %elif j < 100
+ CAT_XDEFINE q00, j, i
+ %elif j < 1000
+ CAT_XDEFINE q0, j, i
+ %else
+ CAT_XDEFINE q, j, i
+ %endif
+ %assign i i+1
+%endrep
+%undef i
+%undef j
+
+%macro FMA_INSTR 3
+ %macro %1 4-7 %1, %2, %3
+ %if cpuflag(xop)
+ v%5 %1, %2, %3, %4
+ %elifnidn %1, %4
+ %6 %1, %2, %3
+ %7 %1, %4
+ %else
+ %error non-xop emulation of ``%5 %1, %2, %3, %4'' is not supported
+ %endif
+ %endmacro
+%endmacro
+
+FMA_INSTR pmacsww, pmullw, paddw
+FMA_INSTR pmacsdd, pmulld, paddd ; sse4 emulation
+FMA_INSTR pmacsdql, pmuldq, paddq ; sse4 emulation
+FMA_INSTR pmadcswd, pmaddwd, paddd
+
+; Macros for consolidating FMA3 and FMA4 using 4-operand (dst, src1, src2, src3) syntax.
+; FMA3 is only possible if dst is the same as one of the src registers.
+; Either src2 or src3 can be a memory operand.
+%macro FMA4_INSTR 2-*
+ %push fma4_instr
+ %xdefine %$prefix %1
+ %rep %0 - 1
+ %macro %$prefix%2 4-6 %$prefix, %2
+ %if notcpuflag(fma3) && notcpuflag(fma4)
+ %error use of ``%5%6'' fma instruction in cpuname function: current_function
+ %elif cpuflag(fma4)
+ v%5%6 %1, %2, %3, %4
+ %elifidn %1, %2
+ ; If %3 or %4 is a memory operand it needs to be encoded as the last operand.
+ %ifid %3
+ v%{5}213%6 %2, %3, %4
+ %else
+ v%{5}132%6 %2, %4, %3
+ %endif
+ %elifidn %1, %3
+ v%{5}213%6 %3, %2, %4
+ %elifidn %1, %4
+ v%{5}231%6 %4, %2, %3
+ %else
+ %error fma3 emulation of ``%5%6 %1, %2, %3, %4'' is not supported
+ %endif
+ %endmacro
+ %rotate 1
+ %endrep
+ %pop
+%endmacro
+
+FMA4_INSTR fmadd, pd, ps, sd, ss
+FMA4_INSTR fmaddsub, pd, ps
+FMA4_INSTR fmsub, pd, ps, sd, ss
+FMA4_INSTR fmsubadd, pd, ps
+FMA4_INSTR fnmadd, pd, ps, sd, ss
+FMA4_INSTR fnmsub, pd, ps, sd, ss
+
+; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug (fixed in 1.3.0)
+%ifdef __YASM_VER__
+ %if __YASM_VERSION_ID__ < 0x01030000 && ARCH_X86_64 == 0
+ %macro vpbroadcastq 2
+ %if sizeof%1 == 16
+ movddup %1, %2
+ %else
+ vbroadcastsd %1, %2
+ %endif
+ %endmacro
+ %endif
+%endif